code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
from django.urls import reverse_lazy
from django.utils import timezone
from django.utils.translation import gettext as _
from django.views.generic import TemplateView
from .components import DashboardBaseMixin, DashboardAppMixin
from .dashboard_widgets import DashboardWidgets
from .shortcuts import check_permissions
class IndexView(DashboardBaseMixin, TemplateView):
template_name = 'dashboard/overview.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
user = self.request.user
active_widgets_for_current_user = []
#Check the permissions for each widget...
for widget in DashboardWidgets.active_widgets:
#...only add widgets with matching permissions
if check_permissions(user, widget.permissions):
active_widgets_for_current_user.append(widget)
context['widgets'] = active_widgets_for_current_user
return context
class PersonalDashboardMixin(DashboardAppMixin):
app_name_verbose = _('Meine Übersicht')
app_name = 'my'
@property
def sidebar_links(self):
links = [
(_('Start'), reverse_lazy("dashboard:personal_overview"))
]
from clothing.models import Settings
clothing_settings = Settings.instance()
if clothing_settings is not None and clothing_settings.clothing_ordering_enabled:
links.append((_('Kleidungsbestellung'), reverse_lazy('clothing:overview')))
return links
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
from staff.models import Person
context['person'] = Person.get_by_user(self.request.user)
return context
class PermissionMissingView(DashboardBaseMixin, TemplateView):
template_name = 'dashboard/permission_required.html'
class PersonalOverview(PersonalDashboardMixin, TemplateView):
template_name = 'dashboard/personal_overview.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
person = context['person']
if person is not None:
context['person_registered'] = True
context['next_events'] = [attendance.event for attendance in
person.attendance_set.filter(event__end__gte=timezone.now()).select_related(
'event')]
if person.is_tutor:
context['tutor_group'] = person.tutorgroup_set.first()
if context['tutor_group'] is not None:
context['tutor_partners'] = ", ".join(
t.get_name() for t in context['tutor_group'].tutors.exclude(id=person.id))
from clothing.models import Order, Settings
context['clothing_orders'] = Order.objects.filter(person=person).select_related('type', 'size', 'color')
clothing_settings = Settings.instance()
if clothing_settings is not None:
context[
'show_clothing_order_warning'] = clothing_settings.clothing_ordering_enabled and Order.user_eligible_but_not_ordered_yet(
person)
return context
| d120/pyophase | dashboard/views.py | Python | agpl-3.0 | 3,245 |
import unittest
from juju.utils import juju_config_dir, juju_ssh_key_paths
class TestDirResolve(unittest.TestCase):
def test_config_dir(self):
config_dir = juju_config_dir()
assert 'local/share/juju' in config_dir
def test_juju_ssh_key_paths(self):
public, private = juju_ssh_key_paths()
assert public.endswith('ssh/juju_id_rsa.pub')
assert private.endswith('ssh/juju_id_rsa')
| juju/python-libjuju | tests/unit/test_utils.py | Python | apache-2.0 | 429 |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import gettext_lazy as _, ngettext
from reversion.admin import VersionAdmin
from base.models.enums import education_group_categories
from osis_common.models.serializable_model import SerializableModelAdmin, SerializableModel, SerializableModelManager
class EducationGroupAdmin(VersionAdmin, SerializableModelAdmin):
list_display = ('most_recent_acronym', 'start_year', 'end_year', 'changed')
search_fields = ('educationgroupyear__acronym', 'educationgroupyear__partial_acronym')
class EducationGroupManager(SerializableModelManager):
def having_related_training(self, **kwargs):
# .distinct() is necessary if there is more than one training egy related to an education_group
return self.filter(
educationgroupyear__education_group_type__category=education_group_categories.TRAINING,
**kwargs
).distinct()
class EducationGroup(SerializableModel):
objects = EducationGroupManager()
external_id = models.CharField(max_length=100, blank=True, null=True, db_index=True)
changed = models.DateTimeField(null=True, auto_now=True)
start_year = models.ForeignKey(
'AcademicYear',
verbose_name=_('Start academic year'),
related_name='start_years',
on_delete=models.PROTECT
)
end_year = models.ForeignKey(
'AcademicYear',
blank=True,
null=True,
verbose_name=_('Last year of organization'),
related_name='end_years',
on_delete=models.PROTECT
)
@property
def most_recent_acronym(self):
most_recent_education_group = self.educationgroupyear_set.filter(education_group_id=self.id) \
.latest('academic_year__year')
return most_recent_education_group.acronym
def __str__(self):
return "{}".format(self.id)
class Meta:
permissions = (
("add_training", "Can add training"),
("add_minitraining", "Can add mini-training"),
("add_group", "Can add group"),
("delete_training", "Can delete training"),
("delete_minitraining", "Can delete mini-training"),
("delete_group", "Can delete group"),
("change_commonpedagogyinformation", "Can change common pedagogy information"),
("change_pedagogyinformation", "Can change pedagogy information"),
("change_link_data", "Can change link data"),
)
verbose_name = _("Education group")
def clean(self):
# Check end_year should be greater of equals to start_year
if self.start_year and self.end_year:
if self.start_year.year > self.end_year.year:
raise ValidationError({
'end_year': _("%(max)s must be greater or equals than %(min)s") % {
"max": _("Last year of organization").title(),
"min": _("Start"),
}
})
| uclouvain/OSIS-Louvain | base/models/education_group.py | Python | agpl-3.0 | 4,326 |
#!/usr/bin/python3
# -*- coding:Utf-8 -*-
# EPUB-Nox: read EPUB files in CLI.
# Copyright (C) 2015 Etienne Nadji <[email protected]>
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Not a CLI EPUB viewer.
This program merge all textfiles in the EPUB in one and accordingly
fix paths of all references, images, css.
"""
# Imports ===============================================================#
import os
import sys
import shutil
import xml.etree.ElementTree as etree
import noxlib
# Configuration =========================================================#
# Starts immediatly a viewer?
VIEW = True
# Viewer command line
VIEWER = "elinks"
# Remove the temporary EPUB unique webpage after reading
BUUURN = True
# Variables globales ====================================================#
__author__ = "Etienne Nadji <[email protected]>"
# Fonctions =============================================================#
def main(epub_file):
epub = noxlib.EPUB()
try:
epub.open(epub_file)
except NoEpubBufferFound:
print ("No EPUB file named {0}".format(epub_file))
return 0
temp_dir = epub.make_working_dir()
if temp_dir:
flat = False
opf_file = False
# Extract the epub
epub.extract()
# Find the OPF file
for f in epub.original_epub["zip_files"]:
if f.endswith(".opf"):
opf_file = f; break
if opf_file:
# Parse the OPF xml file
opf_file = epub.temporary_epub["path"] + opf_file
tree = etree.parse(opf_file)
root = tree.getroot()
# Get <manifest>
for child in root:
if "idpf.org" in child.tag:
if "/opf" in child.tag:
if child.tag.endswith("manifest"):
manifest_root = child; break
# Make lists of interesting files
css_files,img_files,html_files = [],[],[]
for child in manifest_root:
if child.tag.endswith("item"):
if "css" in child.attrib["media-type"]:
css_files.append(child.attrib["href"]); continue
if "html" in child.attrib["media-type"]:
html_files.append(child.attrib["href"]); continue
if "image" in child.attrib["media-type"]:
img_files.append(child.attrib["href"]); continue
if html_files:
flat = True
if flat:
# Guess if OEBPS/OPS is used
epub.guess_destination(None)
if epub.oebps["used"]:
# Creation of the fullpaths of interesting files
# Text files
subfolder,wedontcare = epub.guess_destination("text")
use_text = False
if "Text/" in subfolder:
for i in html_files:
if "Text/" in i: use_text = True; break
if use_text:
html_fullpath = [
"{0}{1}{2}".format(
epub.temporary_epub["path"],
subfolder,
f
)
for f in html_files
]
else:
html_fullpath = [
"{0}{1}{2}{3}".format(
epub.temporary_epub["path"],
epub.oebps["variant"],
os.sep,
f
)
for f in html_files
]
# Images files
subfolder,wedontcare = epub.guess_destination("image")
use_img = False
if "Images/" in subfolder:
for i in img_files:
if "Images/" in i: use_img = True; break
if use_img:
img_fullpath = [
"{0}{1}{2}".format(
epub.temporary_epub["path"],
subfolder,
f
)
for f in img_files
]
else:
img_fullpath = [
"{0}{1}{2}{3}".format(
epub.temporary_epub["path"],
epub.oebps["variant"],
os.sep,
f
)
for f in img_files
]
# CSS files
use_css = False
if "Styles/" in subfolder:
for i in css_files:
if "Styles/" in i: use_css = True; break
else:
html_fullpath = [
"{0}{1}{2}".format(
epub.temporary_epub["path"],
os.sep,f
)
for f in html_files
]
img_files = [
"{0}{1}{2}".format(
epub.temporary_epub["path"],
os.sep,f
)
for f in img_files
]
css_files = [
"{0}{1}{2}".format(
epub.temporary_epub["path"],
os.sep,f
)
for f in css_files
]
# Create the flat epub folder
try:
os.mkdir("unique/")
except FileExistsError:
pass
if css_files:
# Make the css folder and copy all css in it
try:
os.mkdir("unique/css/")
except FileExistsError:
pass
for css in css_files:
if epub.oebps["used"]:
shutil.move(
"{0}{1}{2}{3}".format(
epub.temporary_epub["path"],
epub.oebps["variant"],
os.sep,
css
),
"unique/css/{0}".format(css.split(os.sep)[-1])
)
else:
shutil.move(
"{0}{1}".format(epub.temporary_epub["path"],css),
"unique/css/{0}".format(css.split(os.sep)[-1])
)
if img_files:
# Make the image folder and copy all images in it
try:
os.mkdir("unique/images/")
except FileExistsError:
pass
for img in img_files:
if epub.oebps["used"]:
shutil.move(
"{0}{1}{2}{3}".format(
epub.temporary_epub["path"],
epub.oebps["variant"],
os.sep,
img
),
"unique/images/{0}".format(img.split(os.sep)[-1])
)
else:
shutil.move(
"{0}{1}".format(epub.temporary_epub["path"],img),
"unique/images/{0}".format(img.split(os.sep)[-1])
)
# Flat epub html file
unique = "unique/text.html"
with open(unique,"w") as uf:
# Write all subfiles' contents
for f in html_fullpath:
with open(f,"r") as fo:
write = False
for line in fo:
line = line.rstrip()
if f == html_fullpath[0]:
if "</body>" in line:
write = False
break
else:
write = True
else:
if "<body>" in line:
write = True
continue
if "</body>" in line:
write = False
break
# Write the file's content
if write:
# Fix the <img> src
if "<img" in line and "src" in line:
for imagefile in img_files:
nopath = imagefile.split(os.sep)[-1]
if nopath in line:
line = line.replace(imagefile,"images/{0}".format(nopath))
# Fix the <link> href
if "<link" in line and "text/css" in line:
for cssfile in css_files:
nopath = cssfile.split(os.sep)[-1]
if nopath in line:
line = line.replace(cssfile,"css/{0}".format(nopath))
# Fix the <a> href
if "<a" in line and "#" in line:
for textfile in html_files:
nopath = textfile.split(os.sep)[-1]
if nopath in line:
line = line.replace(textfile,"")
# Write the line
uf.write(line+"\n")
# Ends the unique html file
uf.write("</body>\n</html>")
epub.remove_temp_dir()
# Open the unique html file
global VIEW
if VIEW:
global VIEWER
os.system(
"{0} {1}".format(
VIEWER,
unique
)
)
# Burn them all!
global BUUURN
if BUUURN:
shutil.rmtree("unique/")
return 0
else:
return 1
else:
return 1
# Programme =============================================================#
if __name__ == "__main__":
sys.exit(main(sys.argv[-1]))
# vim:set shiftwidth=4 softtabstop=4:
| etnadji/epub-nox | epub-nox.py | Python | gpl-3.0 | 11,939 |
'''
Copyright (C) 2013 Rasmus Eneman <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from .OnOff import OnOff
from .OnOffDimLevel import OnOffDimLevel
from .OnOffColorWheel import OnOffColorWheel
def getWidget(device, ui='default'):
if 'color_wheel' in device.object.SUPPORTED_ACTIONS:
return OnOffColorWheel(ui=ui, device=device)
elif 'dim_level' in device.object.SUPPORTED_ACTIONS:
return OnOffDimLevel(ui=ui, device=device)
else:
return OnOff(ui=ui) | Pajn/RAXA-Django | backend/widgets/__init__.py | Python | agpl-3.0 | 1,099 |
from pysb import ComponentSet
import pysb.core
import inspect
import numpy
import cStringIO
__all__ = ['alias_model_components', 'rules_using_parameter']
def alias_model_components(model=None):
"""Make all model components visible as symbols in the caller's global namespace"""
if model is None:
model = pysb.core.SelfExporter.default_model
caller_globals = inspect.currentframe().f_back.f_globals
components = dict((c.name, c) for c in model.all_components())
caller_globals.update(components)
def rules_using_parameter(model, parameter):
"""Return a ComponentSet of rules in the model which make use of the given parameter"""
cset = ComponentSet()
for rule in model.rules:
if rule.rate_forward is parameter or rule.rate_reverse is parameter:
cset.add(rule)
return cset
def synthetic_data(model, tspan, obs_list=None, sigma=0.1):
#from pysb.integrate import odesolve
from pysb.integrate import Solver
solver = Solver(model, tspan)
solver.run()
# Sample from a normal distribution with variance sigma and mean 1
# (randn generates a matrix of random numbers sampled from a normal
# distribution with mean 0 and variance 1)
#
# Note: This modifies yobs_view (the view on yobs) so that the changes
# are reflected in yobs (which is returned by the function). Since a new
# Solver object is constructed for each function invocation this does not
# cause problems in this case.
solver.yobs_view *= ((numpy.random.randn(*solver.yobs_view.shape) * sigma) + 1)
return solver.yobs
def get_param_num(model, name):
for i in range(len(model.parameters)):
if model.parameters[i].name == name:
print i, model.parameters[i]
break
return i
def write_params(model,paramarr, name=None):
""" write the parameters and values to a csv file
model: a model object
name: a string with the name for the file, or None to return the content
"""
if name is not None:
fobj = open(name, 'w')
else:
fobj = cStringIO.StringIO()
for i in range(len(model.parameters)):
fobj.write("%s, %.17g\n"%(model.parameters[i].name, paramarr[i]))
if name is None:
return fobj.getvalue()
def update_param_vals(model, newvals):
"""update the values of model parameters with the values from a dict.
the keys in the dict must match the parameter names
"""
update = []
noupdate = []
for i in model.parameters:
if i.name in newvals:
i.value = newvals[i.name]
update.append(i.name)
else:
noupdate.append(i.name)
return update, noupdate
def load_params(fname):
"""load the parameter values from a csv file, return them as dict.
"""
parmsff = {}
# FIXME: This might fail if a parameter name is larger than 50 characters.
# FIXME: Maybe do this with the csv module instead?
temparr = numpy.loadtxt(fname, dtype=([('a','S50'),('b','f8')]), delimiter=',')
for i in temparr:
parmsff[i[0]] = i[1]
return parmsff
| neurord/pysb | pysb/util.py | Python | bsd-2-clause | 3,112 |
import parser
import logging
def test(code):
log = logging.getLogger()
parser.parser.parse(code, tracking=True)
print "Programa con 1 var y 1 asignacion bien: "
s = "program id; var beto: int; { id = 1234; }"
test(s)
print "Original: \n{0}".format(s)
print "\n"
print "Programa con 1 var mal: "
s = "program ; var beto: int; { id = 1234; }"
test(s)
print "Original: \n{0}".format(s)
print "\n"
print "Programa sin vars bien: "
s = "program id; { id = 1234; }"
test(s)
print "Original: \n{0}".format(s)
print "\n"
print "Programa con var mal: "
s = "program id; var beto int; { id = 1234; }"
test(s)
print "Original: \n{0}".format(s)
print "\n"
print "Programa con var mal: "
s = "program id; var beto: int { id = 1234; }"
test(s);
print "Original: \n{0}".format(s)
print "\n"
print "Programa con var mal: "
s = "program id; beto: int; { id = 1234; }"
test(s)
print "Original: \n{0}".format(s)
print "\n"
print "Programa con bloque vacio bien: "
s = "program id; var beto: int; { }"
test(s)
print "Original: \n{0}".format(s)
print "\n"
print "Programa con bloque lleno y estatuto mal: "
s = "program id; var beto: int; { id = 1234; id2 = 12345 }"
test(s)
print "Original: \n{0}".format(s)
print "\n"
print "Programa con bloque lleno y condicion mal: "
s = "program id; var beto: int; { id = 1234; if ( 8 > 3 ) { id3 = 34234; } else { } }"
test(s)
print "\n"
print "Original: \n{0}".format(s)
| betoesquivel/PLYpractice | testingParser.py | Python | mit | 1,412 |
###############################################################################
# lazyflow: data flow based lazy parallel computation framework
#
# Copyright (C) 2011-2014, the ilastik developers
# <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
import os
import vigra
from lazyflow.graph import Operator, InputSlot, OutputSlot
from lazyflow.utility.io_util.RESTfulBlockwiseFileset import RESTfulBlockwiseFileset
import logging
logger = logging.getLogger(__name__)
class OpRESTfulBlockwiseFilesetReader(Operator):
"""
Adapter that provides an operator interface to the BlockwiseFileset class for reading ONLY.
"""
name = "OpRESTfulBlockwiseFilesetReader"
DescriptionFilePath = InputSlot(stype='filestring')
Output = OutputSlot()
class MissingDatasetError(Exception):
pass
def __init__(self, *args, **kwargs):
super(OpRESTfulBlockwiseFilesetReader, self).__init__(*args, **kwargs)
self._blockwiseFileset = None
def setupOutputs(self):
if not os.path.exists(self.DescriptionFilePath.value):
raise OpRESTfulBlockwiseFilesetReader.MissingDatasetError("Dataset description not found: {}".format( self.DescriptionFilePath.value ) )
# Load up the class that does the real work
self._blockwiseFileset = RESTfulBlockwiseFileset( self.DescriptionFilePath.value )
# Check for errors in the description file
localDescription = self._blockwiseFileset.compositeDescription.local_description
axes = localDescription.axes
assert False not in map(lambda a: a in 'txyzc', axes), "Unknown axis type. Known axes: txyzc Your axes:".format(axes)
self.Output.meta.shape = tuple(localDescription.view_shape)
self.Output.meta.dtype = localDescription.dtype
self.Output.meta.axistags = vigra.defaultAxistags(localDescription.axes)
drange = localDescription.drange
if drange is not None:
self.Output.meta.drange = drange
def execute(self, slot, subindex, roi, result):
assert slot == self.Output, "Unknown output slot"
self._blockwiseFileset.readData( (roi.start, roi.stop), result )
return result
def propagateDirty(self, slot, subindex, roi):
assert slot == self.DescriptionFilePath, "Unknown input slot."
self.Output.setDirty( slice(None) )
def cleanUp(self):
import sys
if self._blockwiseFileset is not None:
self._blockwiseFileset.close()
super(OpRESTfulBlockwiseFilesetReader, self).cleanUp()
| stuarteberg/lazyflow | lazyflow/operators/ioOperators/opRESTfulBlockwiseFilesetReader.py | Python | lgpl-3.0 | 3,408 |
#!/usr/bin/env python3
import subprocess
import requests
def location() -> (float, float):
try:
process = subprocess.run(
['CoreLocationCLI', '-once', 'YES',
'-format', '%latitude\n%longitude'],
stdout=subprocess.PIPE,
encoding='utf8',
timeout=2,
)
except FileNotFoundError:
print("CoreLocationCLI not installed")
return (None, None)
lines = str(process.stdout).splitlines()
if len(lines) < 2:
return (None, None)
latitude = lines[0]
longitude = lines[1]
return (latitude, longitude)
def first_value(d: dict):
return d['values'][0]['value']
def weather(latitude: float, longitude: float) -> dict:
response = requests.get(
'https://api.weather.gov/points/{lat},{lon}'
.format(lat=latitude, lon=longitude),
timeout=5,
)
forecast_url = response.json()['properties']['forecastGridData']
response = requests.get(forecast_url, timeout=5)
json = response.json()
actual_lat, actual_lon = json['geometry']['coordinates'][0][0]
props = json['properties']
return {
'code': 0,
'coordinates': {
'latitude': actual_lat,
'longitude': actual_lon,
},
'humidity': first_value(props['relativeHumidity']) / 100.0,
'apparentTemperature': first_value(props['apparentTemperature']),
'temperature': first_value(props['temperature']),
'wind': {
'chill': first_value(props['windChill']),
'direction': first_value(props['windDirection']),
'speed': first_value(props['windSpeed']),
},
'units': {
'temperature': 'C',
'apparentTemperature': 'C',
},
}
last_location = (None, None)
def raw_weather_info():
global last_location
latitude, longitude = location()
error = None
if latitude is None or longitude is None:
error = 'Error getting current location.'
if last_location[0] is None or last_location[1] is None:
return {'error': error}
latitude = last_location[0]
longitude = last_location[1]
else:
last_location = (latitude, longitude)
try:
info = weather(latitude, longitude)
if error is not None:
info['error'] = error
return info
except Exception as e:
return {'error': 'Error parsing weather information: ' + str(e)}
if __name__ == '__main__':
print(raw_weather_info())
| JohnStarich/dotfiles | python/johnstarich/weather/parse.py | Python | apache-2.0 | 2,543 |
import operator
import distance
# distance_func = distance.hamming_distance
distance_func = distance.euclidean_distance
# all arguments must be int-elemented
def kNN(inputData, dataPool, labelPool, k):
distance = list()
for data in dataPool:
distance.append(distance_func(inputData, data))
sorted_index = sorted(range(len(distance)), key = lambda k: distance[k])
k_index = sorted_index[:k]
neighbor_labels = [labelPool[index] for index in k_index]
# print(neighbor_labels)
neighbor_labels_count_dict = dict()
for label in neighbor_labels:
if label not in neighbor_labels_count_dict:
neighbor_labels_count_dict[label] = 1
else:
neighbor_labels_count_dict[label] += 1
# print(neighbor_labels_count_dict)
sorted_dict = sorted(neighbor_labels_count_dict.items(), key = operator.itemgetter(1))
# print(sorted_dict)
return sorted_dict[-1][0]
| longtengz/pyml | kNearestNeighbors/kNN.py | Python | mit | 948 |
import json
import pickle
import numpy as np
import pandas as pd
import numpy as np
import datatables.traveltime
def write_model(baserate, model_file):
"""
Write model to file
baserate -- average travel time
output_file -- file
"""
model_params = {
'baserate': baserate
}
model_str = json.dumps(model_params)
with open(model_file, 'w') as out_f:
out_f.write(model_str)
def load_model(model_file):
"""
Load linear model from file
model_file -- file
returns -- baserate
"""
with open(model_file, 'r') as model_f:
model_str = model_f.read()
model_params = json.loads(model_str)
return model_params['baserate']
def train(train_data_file, model_file):
data = datatables.traveltime.read_xs(train_data_file)
y = data['y'].values # travel times
# use mean value as baserate prediction
baserate = np.mean(y)
write_model(baserate, model_file)
def predict(model_file, test_xs_file, output_file):
baserate = load_model(model_file)
data = datatables.traveltime.read_xs(test_xs_file)
num_rows = data.shape[0]
# predict constant baserate for every row
y_pred = np.full(num_rows, baserate)
data['pred'] = y_pred
datatables.traveltime.write_pred(data, output_file)
| anjsimmo/simple-ml-pipeline | learners/traveltime_baserate.py | Python | mit | 1,292 |
import re
def fileNameTextToFloat(valStr, unitStr):
# if there's a 'p' character, then we have to deal with decimal vals
if 'p' in valStr:
regex = re.compile(r"([0-9]+)p([0-9]+)")
wholeVal = regex.findall(valStr)[0][0]
decimalVal = regex.findall(valStr)[0][1]
baseVal = 1.0*int(wholeVal) + 1.0*int(decimalVal)/10**len(decimalVal)
else:
baseVal = 1.0*int(valStr)
if unitStr == "G":
multiplier = 1e9
elif unitStr == "M":
multiplier = 1e6
elif unitStr == "k":
multiplier = 1e3
else:
multiplier = 1.0
return baseVal * multiplier
class iqFileObject():
def __init__(self, prefix = None, centerFreq = None,
sampRate = None, fileName = None):
# if no file name is specified, store the parameters
if fileName is None:
self.prefix = prefix
self.centerFreq = centerFreq
self.sampRate = sampRate
# if the file name is specified, we must derive the parameters
# from the file name
else:
# first check if we have a simple file name or a name+path
regex = re.compile(r"\/")
if regex.match(fileName):
# separate the filename from the rest of the path
regex = re.compile(r"\/([a-zA-Z0-9_.]+)$")
justName = regex.findall(fileName)[0]
else:
justName = fileName
# get the substrings representing the values
regex = re.compile(r"_c([0-9p]+)([GMK])_s([0-9p]+)([GMk])\.iq$")
paramList = regex.findall(justName)
try:
centerValStr = paramList[0][0]
centerUnitStr = paramList[0][1]
sampValStr = paramList[0][2]
sampUnitStr = paramList[0][3]
self.centerFreq = fileNameTextToFloat(centerValStr, centerUnitStr)
self.sampRate = fileNameTextToFloat(sampValStr, sampUnitStr)
except:
return
def fileName(self):
tempStr = self.prefix
# add center frequency
# first determine if we should use k, M, G or nothing
# then divide by the appropriate unit
if self.centerFreq > 1e9:
unitMag = 'G'
wholeVal = int(1.0*self.centerFreq/1e9)
decimalVal = (1.0*self.centerFreq - 1e9*wholeVal)
decimalVal = int(decimalVal/1e7)
elif self.centerFreq > 1e6:
unitMag = 'M'
wholeVal = int(1.0*self.centerFreq/1e6)
decimalVal = (1.0*self.centerFreq - 1e6*wholeVal)
decimalVal = int(decimalVal/1e4)
elif self.centerFreq > 1e3:
unitMag = 'k'
wholeVal = int(1.0*self.centerFreq/1e3)
decimalVal = (1.0*self.centerFreq - 1e3*wholeVal)
decimalVal = int(decimalVal/1e1)
else:
unitMag = ''
value = int(self.centerFreq)
if decimalVal == 0:
tempStr += "_c{}{}".format(wholeVal, unitMag)
else:
tempStr += "_c{}p{}{}".format(wholeVal, decimalVal, unitMag)
# do the same thing for the sample rate
if self.sampRate > 1e6:
unitMag = 'M'
wholeVal = int(1.0*self.sampRate/1e6)
decimalVal = (1.0*self.sampRate - 1e6*wholeVal)
decimalVal = int(decimalVal/1e4)
elif self.sampRate > 1e3:
unitMag = 'k'
wholeVal = int(1.0*self.sampRate/1e3)
decimalVal = (1.0*self.sampRate - 1e3*wholeVal)
value = self.sampRate/1e1
else:
unitMag = ''
value = int(self.sampRate)
if decimalVal == 0:
tempStr += "_s{}{}".format(wholeVal, unitMag)
else:
tempStr += "_s{}p{}{}".format(wholeVal, decimalVal, unitMag)
tempStr += ".iq"
return tempStr
| paulgclark/waveconverter | src/iqFileArgParse.py | Python | mit | 3,934 |
# MacroIP_DHCP is part of MacroIP Core. Provides Access to DHCP services through simple
# textual macros.
# Copyright (C) 2014 Nicola Cimmino
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
# This service expects a LoPNode connected on serial port ttyUSB0 and set
# to access point mode already (ATAP1). In due time autodiscovery and
# configuration will be built.
#
# We don't really lease addressed from a DHCP server here. Instead our box has
# a pool of IP address aliases that are for us to distribute to our client.
# This has the benefit of not requiring to modify on the fly our network config
# since in the end we always would need to have the addresses assigned to
# this box in oder to get the traffic. This has the disavantage to require
# a range of private IPs to be reserved for out use.
# This will come from config and it will be speficied as a range
addresses_pool = [ "192.168.0.200", "192.168.0.201", "192.168.0.202", "192.168.0.203", "192.168.0.204",
"192.168.0.205", "192.168.0.206", "192.168.0.207", "192.168.0.208", "192.168.0.209" ]
outputMacrosQueue = []
import sqlite3
databasefile = 'loprandb.sqlite'
def processMacro(clientid, macro):
if macro.startswith("dhcp.lease\\"):
# For now we have a very simple "DHCP" sevice, just always assing IP based on client ID.
# This works only for proof of concept as we won't have ids above 9 anywat.
ip_address = addresses_pool[clientid]
setLeasedIP(clientid, ip_address)
print "Leased: " + ip_address + " to " + str(clientid)
outputMacrosQueue.append((clientid, "\\dhcp.ip\\" + str(ip_address) + "\\\\"))
def getOutputMacroIPMacro():
if len(outputMacrosQueue) > 0:
return outputMacrosQueue.pop(0)
else:
return (None, None)
def getIP(clientid):
dbconnection = sqlite3.connect(databasefile)
cursor = dbconnection.cursor()
arguments = (clientid,)
cursor.execute('SELECT * FROM dhcp_leases WHERE client_id=?', arguments)
dbconnection.close()
return cursor.fetchone()['ip_address']
def getClientID(ipaddress):
dbconnection = sqlite3.connect(databasefile)
cursor = dbconnection.cursor()
arguments = (ipaddress,)
cursor.execute('SELECT client_id FROM dhcp_leases WHERE ip_address=?', arguments)
result = cursor.fetchone()
if result != None:
clientid = result[0]
else:
clientid = 0
dbconnection.close()
return clientid
def setLeasedIP(clientid, ipaddress):
exiting_clientid = getClientID(ipaddress)
dbconnection = sqlite3.connect(databasefile)
cursor = dbconnection.cursor()
arguments = (clientid, ipaddress)
if exiting_clientid == 0:
cursor.execute('INSERT INTO dhcp_leases (client_id, ip_address) VALUES (?, ?) ', arguments)
else:
cursor.execute('UPDATE dhcp_leases SET client_id=?, timestamp=CURRENT_TIMESTAMP where ip_address=? ', arguments)
dbconnection.commit()
dbconnection.close()
| nicolacimmino/LoP-RAN | LoPAccessPoint/MacroIP_DHCP.py | Python | gpl-3.0 | 3,535 |
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2014-2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the refresh user principals command."""
import os
import pwd
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestRefreshUser(TestBrokerCommand):
def test_110_grant_testuser4_root(self):
command = ["grant_root_access", "--user", "testuser4",
"--personality", "utunused/dev"] + self.valid_just_tcm
self.successtest(command)
def test_111_verify_testuser4_root(self):
command = ["show_personality", "--personality", "utunused/dev"]
out = self.commandtest(command)
self.matchoutput(out, "Root Access User: testuser4", command)
command = ["cat", "--personality", "utunused/dev",
"--archetype", "aquilon"]
out = self.commandtest(command)
self.matchoutput(out, "testuser4", command)
def test_200_refresh(self):
command = ["refresh_user"]
err = self.statustest(command)
self.matchoutput(err,
"Duplicate UID: 1236 is already used by testuser3, "
"skipping dup_uid.",
command)
self.matchoutput(err, "Added 3, deleted 1, updated 2 users.", command)
def test_210_verify_all(self):
command = ["show_user", "--all"]
out = self.commandtest(command)
self.matchoutput(out, "testuser1", command)
self.matchoutput(out, "testuser2", command)
self.matchoutput(out, "testuser3", command)
self.matchclean(out, "testuser4", command)
self.matchclean(out, "bad_line", command)
self.matchclean(out, "dup_uid", command)
self.matchclean(out, "foo", command)
self.matchoutput(out, "testbot1", command)
self.matchoutput(out, "testbot2", command)
def test_210_verify_testuser1(self):
command = ["show_user", "--username", "testuser1"]
out = self.commandtest(command)
self.searchoutput(out, r'User: testuser1$', command)
self.searchoutput(out, r'Type: human$', command)
self.searchoutput(out, r'UID: 1234$', command)
self.searchoutput(out, r'GID: 423$', command)
self.searchoutput(out, r'Full Name: test user 1$', command)
self.searchoutput(out, r'Home Directory: /tmp$', command)
def test_210_verify_testuser3(self):
command = ["show_user", "--username", "testuser3"]
out = self.commandtest(command)
self.searchoutput(out, r'User: testuser3$', command)
self.searchoutput(out, r'Type: human$', command)
self.searchoutput(out, r'UID: 1236$', command)
self.searchoutput(out, r'GID: 655$', command)
self.searchoutput(out, r'Full Name: test user 3$', command)
self.searchoutput(out, r'Home Directory: /tmp/foo$', command)
def test_210_verify_testbot1_robot(self):
command = ["show_user", "--username", "testbot1"]
out = self.commandtest(command)
self.searchoutput(out, r'User: testbot1$', command)
self.searchoutput(out, r'Type: robot$', command)
self.searchoutput(out, r'UID: 1337$', command)
self.searchoutput(out, r'GID: 655$', command)
self.searchoutput(out, r'Full Name: test bot 1$', command)
self.searchoutput(out, r'Home Directory: /tmp/bothome1$', command)
def test_210_verify_testbot2_not_robot(self):
command = ["show_user", "--username", "testbot2"]
out = self.commandtest(command)
self.searchoutput(out, r'User: testbot2$', command)
self.searchoutput(out, r'Type: human$', command)
self.searchoutput(out, r'UID: 1338$', command)
self.searchoutput(out, r'GID: 655$', command)
self.searchoutput(out, r'Full Name: test bot 2$', command)
self.searchoutput(out, r'Home Directory: /tmp/bothome2$', command)
def test_220_verify_testuser4_root_gone(self):
command = ["show_personality", "--personality", "utunused/dev"]
out = self.commandtest(command)
self.matchclean(out, "testuser4", command)
command = ["cat", "--personality", "utunused/dev",
"--archetype", "aquilon"]
out = self.commandtest(command)
self.matchclean(out, "testuser4", command)
def test_300_update_testuser3(self):
self.noouttest(["update_user", "--username", "testuser3",
"--uid", "1237", "--gid", "123",
"--full_name", "Some other name",
"--home_directory", "/tmp"] + self.valid_just_sn)
def test_300_update_testbot1(self):
self.noouttest(["update_user", "--username", "testbot1",
"--type", "human"] + self.valid_just_sn)
def test_300_update_testbot2(self):
self.noouttest(["update_user", "--username", "testbot2",
"--type", "robot"] + self.valid_just_sn)
def test_301_verify_testuser3_before_sync(self):
command = ["show_user", "--username", "testuser3"]
out = self.commandtest(command)
self.searchoutput(out, r'User: testuser3$', command)
self.searchoutput(out, r'Type: human$', command)
self.searchoutput(out, r'UID: 1237$', command)
self.searchoutput(out, r'GID: 123$', command)
self.searchoutput(out, r'Full Name: Some other name$', command)
self.searchoutput(out, r'Home Directory: /tmp$', command)
def test_301_verify_testbot1_before_sync(self):
command = ["show_user", "--username", "testbot1"]
out = self.commandtest(command)
self.searchoutput(out, r'User: testbot1$', command)
self.searchoutput(out, r'Type: human$', command)
def test_301_verify_testbot2_before_sync(self):
command = ["show_user", "--username", "testbot2"]
out = self.commandtest(command)
self.searchoutput(out, r'User: testbot2$', command)
self.searchoutput(out, r'Type: robot$', command)
def test_305_refresh_again(self):
command = ['refresh_user', '--incremental']
err = self.partialerrortest(command)
self.matchoutput(err,
'Duplicate UID: 1236 is already used by testuser3, '
'skipping dup_uid.',
command)
self.matchoutput(err,
'Updating human user testuser3 (uid = 1236, was '
'1237; gid = 655, was 123; '
'full_name = test user 3, was Some other name; '
'home_dir = /tmp/foo, was /tmp)',
command)
self.matchoutput(err,
'Updating robot user testbot1 (type = robot, was '
'human)',
command)
self.matchoutput(err,
'Updating human user testbot2 (type = human, was '
'robot)',
command)
def test_310_verify_testuser1_again(self):
command = ["show_user", "--username", "testuser1"]
out = self.commandtest(command)
self.searchoutput(out, r'User: testuser1$', command)
self.searchoutput(out, r'Type: human$', command)
self.searchoutput(out, r'UID: 1234$', command)
self.searchoutput(out, r'GID: 423$', command)
self.searchoutput(out, r'Full Name: test user 1$', command)
self.searchoutput(out, r'Home Directory: /tmp$', command)
def test_310_verify_testuser3_again(self):
command = ["show_user", "--username", "testuser3"]
out = self.commandtest(command)
self.searchoutput(out, r'User: testuser3$', command)
self.searchoutput(out, r'Type: human$', command)
self.searchoutput(out, r'UID: 1236$', command)
self.searchoutput(out, r'GID: 655$', command)
self.searchoutput(out, r'Full Name: test user 3$', command)
self.searchoutput(out, r'Home Directory: /tmp/foo$', command)
def test_310_verify_testbot1_again(self):
command = ["show_user", "--username", "testbot1"]
out = self.commandtest(command)
self.searchoutput(out, r'User: testbot1$', command)
self.searchoutput(out, r'Type: robot$', command)
def test_310_verify_testbot2_again(self):
command = ["show_user", "--username", "testbot2"]
out = self.commandtest(command)
self.searchoutput(out, r'User: testbot2$', command)
self.searchoutput(out, r'Type: human$', command)
def test_310_verify_all_again(self):
command = ["show_user", "--all"]
out = self.commandtest(command)
self.matchoutput(out, "testuser1", command)
self.matchoutput(out, "testuser2", command)
self.matchoutput(out, "testuser3", command)
self.matchclean(out, "testuser4", command)
self.matchclean(out, "bad_line", command)
self.matchclean(out, "dup_uid", command)
self.matchoutput(out, "testbot1", command)
self.matchoutput(out, "testbot2", command)
def test_320_add_users(self):
limit = self.config.getint("broker", "user_delete_limit")
for i in range(limit + 5):
name = "testdel_%d" % i
uid = i + 5000
self.noouttest(["add_user", "--username", name, "--uid", uid,
"--gid", 1000, "--full_name", "Delete test",
"--home_directory", "/tmp"] + self.valid_just_tcm)
def test_321_refresh_refuse(self):
limit = self.config.getint("broker", "user_delete_limit")
command = ["refresh_user"]
out = self.statustest(command)
self.matchoutput(out,
"Cowardly refusing to delete %s users, because "
"it is over the limit of %s. Use the "
"--ignore_delete_limit option to override." %
(limit + 5, limit),
command)
self.matchoutput(out, "deleted 0,", command)
def test_322_verify_still_there(self):
command = ["show_user", "--all"]
out = self.commandtest(command)
limit = self.config.getint("broker", "user_delete_limit")
for i in range(limit + 5):
name = "testdel_%d" % i
self.matchoutput(out, name, command)
def test_323_refresh_override(self):
limit = self.config.getint("broker", "user_delete_limit")
command = ["refresh", "user", "--ignore_delete_limit"]
out = self.statustest(command)
self.matchoutput(out,
"Added 0, deleted %s, updated 0 users." % (limit + 5),
command)
def test_324_verify_all_gone(self):
command = ["show_user", "--all"]
out = self.commandtest(command)
self.matchoutput(out, "testuser1", command)
self.matchoutput(out, "testuser2", command)
self.matchoutput(out, "testuser3", command)
self.matchclean(out, "testuser4", command)
self.matchclean(out, "bad_line", command)
self.matchclean(out, "dup_uid", command)
self.matchclean(out, "testdel_", command)
self.matchoutput(out, "testbot1", command)
self.matchoutput(out, "testbot2", command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestRefreshUser)
unittest.TextTestRunner(verbosity=2).run(suite)
| quattor/aquilon | tests/broker/test_refresh_user.py | Python | apache-2.0 | 12,148 |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import copy
import numpy
from pyscf import gto, dft, lib
from pyscf.dft import radi
from pyscf.grad import rks
def grids_response(grids):
# JCP 98, 5612 (1993); DOI:10.1063/1.464906
mol = grids.mol
atom_grids_tab = grids.gen_atomic_grids(mol, grids.atom_grid,
grids.radi_method,
grids.level, grids.prune)
atm_coords = numpy.asarray(mol.atom_coords() , order='C')
atm_dist = gto.mole.inter_distance(mol, atm_coords)
def _radii_adjust(mol, atomic_radii):
charges = mol.atom_charges()
if grids.radii_adjust == radi.treutler_atomic_radii_adjust:
rad = numpy.sqrt(atomic_radii[charges]) + 1e-200
elif grids.radii_adjust == radi.becke_atomic_radii_adjust:
rad = atomic_radii[charges] + 1e-200
else:
fadjust = lambda i, j, g: g
gadjust = lambda *args: 1
return fadjust, gadjust
rr = rad.reshape(-1,1) * (1./rad)
a = .25 * (rr.T - rr)
a[a<-.5] = -.5
a[a>0.5] = 0.5
def fadjust(i, j, g):
return g + a[i,j]*(1-g**2)
#: d[g + a[i,j]*(1-g**2)] /dg = 1 - 2*a[i,j]*g
def gadjust(i, j, g):
return 1 - 2*a[i,j]*g
return fadjust, gadjust
fadjust, gadjust = _radii_adjust(mol, grids.atomic_radii)
def gen_grid_partition(coords, atom_id):
ngrids = coords.shape[0]
grid_dist = numpy.empty((mol.natm,ngrids))
for ia in range(mol.natm):
dc = coords - atm_coords[ia]
grid_dist[ia] = numpy.linalg.norm(dc,axis=1) + 1e-200
pbecke = numpy.ones((mol.natm,ngrids))
for i in range(mol.natm):
for j in range(i):
g = 1/atm_dist[i,j] * (grid_dist[i]-grid_dist[j])
g = fadjust(i, j, g)
g = (3 - g**2) * g * .5
g = (3 - g**2) * g * .5
g = (3 - g**2) * g * .5
pbecke[i] *= .5 * (1-g + 1e-200)
pbecke[j] *= .5 * (1+g + 1e-200)
dpbecke = numpy.zeros((mol.natm,mol.natm,ngrids,3))
for ia in range(mol.natm):
for ib in range(mol.natm):
if ib != ia:
g = 1/atm_dist[ia,ib] * (grid_dist[ia]-grid_dist[ib])
p0 = gadjust(ia, ib, g)
g = fadjust(ia, ib, g)
p1 = (3 - g **2) * g * .5
p2 = (3 - p1**2) * p1 * .5
p3 = (3 - p2**2) * p2 * .5
s_uab = .5 * (1 - p3 + 1e-200)
t_uab = -27./16 * (1-p2**2) * (1-p1**2) * (1-g**2)
t_uab /= s_uab
t_uab *= p0
# * When grid is on atom ia/ib, ua/ub == 0, d_uba/d_uab may have huge error
# How to remove this error?
# * JCP 98, 5612 (1993); (B8) (B10) miss many terms
uab = atm_coords[ia] - atm_coords[ib]
if ia == atom_id: # dA PA: dA~ib, PA~ia
ua = atm_coords[ib] - coords
d_uab = ua/grid_dist[ib,:,None]/atm_dist[ia,ib]
v = (grid_dist[ia]-grid_dist[ib])/atm_dist[ia,ib]**3
d_uab-= v[:,None] * uab
dpbecke[ia,ia] += (pbecke[ia]*t_uab).reshape(-1,1) * d_uab
else: # dB PB: dB~ib, PB~ia
ua = atm_coords[ia] - coords
d_uab = ua/grid_dist[ia,:,None]/atm_dist[ia,ib]
v = (grid_dist[ia]-grid_dist[ib])/atm_dist[ia,ib]**3
d_uab-= v[:,None] * uab
dpbecke[ia,ia] += (pbecke[ia]*t_uab).reshape(-1,1) * d_uab
if ib != atom_id: # dA PB: dA~atom_id PB~ia D~ib
ua_ub = ((coords-atm_coords[ia])/grid_dist[ia,:,None] -
(coords-atm_coords[ib])/grid_dist[ib,:,None])
ua_ub /= atm_dist[ia,ib]
dpbecke[atom_id,ia] += (pbecke[ia]*t_uab)[:,None] * ua_ub
uba = atm_coords[ib] - atm_coords[ia]
if ib == atom_id: # dA PB: dA~ib PB~ia
ub = atm_coords[ia] - coords
d_uba = ub/grid_dist[ia,:,None]/atm_dist[ia,ib]
v = (grid_dist[ib]-grid_dist[ia])/atm_dist[ia,ib]**3
d_uba-= v[:,None] * uba
dpbecke[ib,ia] += -(pbecke[ia]*t_uab).reshape(-1,1) * d_uba
else: # dB PC: dB~ib, PC~ia and dB PA: dB~ib, PA~ia
ub = atm_coords[ib] - coords
d_uba = ub/grid_dist[ib,:,None]/atm_dist[ia,ib]
v = (grid_dist[ib]-grid_dist[ia])/atm_dist[ia,ib]**3
d_uba-= v[:,None] * uba
dpbecke[ib,ia] += -(pbecke[ia]*t_uab).reshape(-1,1) * d_uba
return pbecke, dpbecke
ngrids = 0
for ia in range(mol.natm):
coords, vol = atom_grids_tab[mol.atom_symbol(ia)]
ngrids += vol.size
coords_all = numpy.zeros((ngrids,3))
w0 = numpy.zeros((ngrids))
w1 = numpy.zeros((mol.natm,ngrids,3))
p1 = 0
for ia in range(mol.natm):
coords, vol = atom_grids_tab[mol.atom_symbol(ia)]
coords = coords + atm_coords[ia]
p0, p1 = p1, p1 + vol.size
coords_all[p0:p1] = coords
pbecke, dpbecke = gen_grid_partition(coords, ia)
z = pbecke.sum(axis=0)
for ib in range(mol.natm): # derivative wrt to atom_ib
dz = dpbecke[ib].sum(axis=0)
w1[ib,p0:p1] = dpbecke[ib,ia]/z[:,None] - (pbecke[ia]/z**2)[:,None]*dz
w1[ib,p0:p1] *= vol[:,None]
w0[p0:p1] = vol * pbecke[ia] / z
return coords_all, w0, w1
mol = gto.Mole()
mol.verbose = 5
mol.output = '/dev/null'
mol.atom.extend([
["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ])
mol.basis = '6-31g'
mol.build()
mf = dft.RKS(mol)
mf.conv_tol = 1e-14
mf.kernel()
def tearDownModule():
global mol, mf
mol.stdout.close()
del mol, mf
class KnownValues(unittest.TestCase):
def test_finite_diff_rks_grad(self):
g = mf.nuc_grad_method().kernel()
self.assertAlmostEqual(lib.finger(g), -0.049887866191414401, 6)
mf_scanner = mf.as_scanner()
e1 = mf_scanner(mol.set_geom_('O 0. 0. 0.0001; 1 0. -0.757 0.587; 1 0. 0.757 0.587'))
e2 = mf_scanner(mol.set_geom_('O 0. 0. -.0001; 1 0. -0.757 0.587; 1 0. 0.757 0.587'))
self.assertAlmostEqual(g[0,2], (e1-e2)/2e-4*lib.param.BOHR, 4)
def test_different_grids_for_grad(self):
grids1 = dft.gen_grid.Grids(mol)
grids1.level = 1
g = mf.nuc_grad_method().set(grids=grids1).kernel()
self.assertAlmostEqual(lib.finger(g), -0.049837230292484727, 6)
def test_grid_response(self):
mol1 = gto.Mole()
mol1.verbose = 0
mol1.atom = '''
H 0. 0 -0.50001
C 0. 1 .1
O 0. 0 0.5
F 1. .3 0.5'''
mol1.unit = 'B'
mol1.build()
grids1 = dft.gen_grid.Grids(mol1)
c, w0b, w1b = grids_response(grids1)
mol = gto.Mole()
mol.verbose = 0
mol.atom = '''
H 0. 0 -0.5
C 0. 1 .1
O 0. 0 0.5
F 1. .3 0.5'''
mol.unit = 'B'
mol.build()
grids = dft.gen_grid.Grids(mol)
c, w0a, w1a = grids_response(grids)
self.assertAlmostEqual(lib.finger(w1a.transpose(0,2,1)), -13.101186585274547, 10)
mol0 = gto.Mole()
mol0.verbose = 0
mol0.atom = '''
H 0. 0 -0.49999
C 0. 1 .1
O 0. 0 0.5
F 1. .3 0.5'''
mol0.unit = 'B'
mol0.build()
grids0 = dft.gen_grid.Grids(mol0)
c, w0a = grids_response(grids0)[:2]
dw = (w0a-w0b) / .00002
self.assertTrue(abs(dw-w1a[0,:,2]).max() < 1e-5)
coords = []
w0 = []
w1 = []
for c_a, w0_a, w1_a in rks.grids_response_cc(grids):
coords.append(c_a)
w0.append(w0_a)
w1.append(w1_a)
coords = numpy.vstack(coords)
w0 = numpy.hstack(w0)
w1 = numpy.concatenate(w1, axis=2)
self.assertAlmostEqual(lib.finger(w1), -13.101186585274547, 10)
self.assertAlmostEqual(abs(w1-w1a.transpose(0,2,1)).max(), 0, 12)
grids.radii_adjust = radi.becke_atomic_radii_adjust
coords = []
w0 = []
w1 = []
for c_a, w0_a, w1_a in rks.grids_response_cc(grids):
coords.append(c_a)
w0.append(w0_a)
w1.append(w1_a)
coords = numpy.vstack(coords)
w0 = numpy.hstack(w0)
w1 = numpy.concatenate(w1, axis=2)
self.assertAlmostEqual(lib.finger(w1), -163.85086096365865, 9)
def test_get_vxc(self):
mol = gto.Mole()
mol.verbose = 0
mol.atom = [
['O' , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ]
mol.basis = '631g'
mol.build()
mf = dft.RKS(mol)
mf.conv_tol = 1e-12
mf.grids.radii_adjust = radi.becke_atomic_radii_adjust
mf.scf()
g = rks.Gradients(mf)
g.grid_response = True
g0 = g.kernel()
dm0 = mf.make_rdm1()
mol0 = gto.Mole()
mol0.verbose = 0
mol0.atom = [
['O' , (0. , 0. ,-0.00001)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ]
mol0.basis = '631g'
mol0.build()
mf0 = dft.RKS(mol0)
mf0.grids.radii_adjust = radi.becke_atomic_radii_adjust
mf0.conv_tol = 1e-12
e0 = mf0.scf()
denom = 1/.00002 * lib.param.BOHR
mol1 = gto.Mole()
mol1.verbose = 0
mol1.atom = [
['O' , (0. , 0. , 0.00001)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ]
mol1.basis = '631g'
mol1.build()
mf1 = dft.RKS(mol1)
mf1.grids.radii_adjust = radi.becke_atomic_radii_adjust
mf1.conv_tol = 1e-12
e1 = mf1.scf()
self.assertAlmostEqual((e1-e0)*denom, g0[0,2], 6)
# grids response have non-negligible effects for small grids
grids = dft.gen_grid.Grids(mol)
grids.atom_grid = (20,86)
grids.build(with_non0tab=False)
grids0 = dft.gen_grid.Grids(mol0)
grids0.atom_grid = (20,86)
grids0.build(with_non0tab=False)
grids1 = dft.gen_grid.Grids(mol1)
grids1.atom_grid = (20,86)
grids1.build(with_non0tab=False)
xc = 'lda,'
exc0 = dft.numint.nr_rks(mf0._numint, mol0, grids0, xc, dm0)[1]
exc1 = dft.numint.nr_rks(mf1._numint, mol1, grids1, xc, dm0)[1]
grids0_w = copy.copy(grids0)
grids0_w.weights = grids1.weights
grids0_c = copy.copy(grids0)
grids0_c.coords = grids1.coords
exc0_w = dft.numint.nr_rks(mf0._numint, mol0, grids0_w, xc, dm0)[1]
exc0_c = dft.numint.nr_rks(mf1._numint, mol1, grids0_c, xc, dm0)[1]
dexc_t = (exc1 - exc0) * denom
dexc_c = (exc0_c - exc0) * denom
dexc_w = (exc0_w - exc0) * denom
self.assertAlmostEqual(dexc_t, dexc_c+dexc_w, 4)
vxc = rks.get_vxc(mf._numint, mol, grids, xc, dm0)[1]
ev1, vxc1 = rks.get_vxc_full_response(mf._numint, mol, grids, xc, dm0)
p0, p1 = mol.aoslice_by_atom()[0][2:]
exc1_approx = numpy.einsum('xij,ij->x', vxc[:,p0:p1], dm0[p0:p1])*2
exc1_full = numpy.einsum('xij,ij->x', vxc1[:,p0:p1], dm0[p0:p1])*2 + ev1[0]
self.assertAlmostEqual(dexc_t, exc1_approx[2], 2)
self.assertAlmostEqual(dexc_t, exc1_full[2], 5)
xc = 'pbe,'
exc0 = dft.numint.nr_rks(mf0._numint, mol0, grids0, xc, dm0)[1]
exc1 = dft.numint.nr_rks(mf1._numint, mol1, grids1, xc, dm0)[1]
grids0_w = copy.copy(grids0)
grids0_w.weights = grids1.weights
grids0_c = copy.copy(grids0)
grids0_c.coords = grids1.coords
exc0_w = dft.numint.nr_rks(mf0._numint, mol0, grids0_w, xc, dm0)[1]
exc0_c = dft.numint.nr_rks(mf1._numint, mol1, grids0_c, xc, dm0)[1]
dexc_t = (exc1 - exc0) * denom
dexc_c = (exc0_c - exc0) * denom
dexc_w = (exc0_w - exc0) * denom
self.assertAlmostEqual(dexc_t, dexc_c+dexc_w, 4)
vxc = rks.get_vxc(mf._numint, mol, grids, xc, dm0)[1]
ev1, vxc1 = rks.get_vxc_full_response(mf._numint, mol, grids, xc, dm0)
p0, p1 = mol.aoslice_by_atom()[0][2:]
exc1_approx = numpy.einsum('xij,ij->x', vxc[:,p0:p1], dm0[p0:p1])*2
exc1_full = numpy.einsum('xij,ij->x', vxc1[:,p0:p1], dm0[p0:p1])*2 + ev1[0]
self.assertAlmostEqual(dexc_t, exc1_approx[2], 2)
self.assertAlmostEqual(dexc_t, exc1_full[2], 5)
xc = 'pbe0'
grids.radii_adjust = None
grids0.radii_adjust = None
grids1.radii_adjust = None
exc0 = dft.numint.nr_rks(mf0._numint, mol0, grids0, xc, dm0)[1]
exc1 = dft.numint.nr_rks(mf1._numint, mol1, grids1, xc, dm0)[1]
grids0_w = copy.copy(grids0)
grids0_w.weights = grids1.weights
grids0_c = copy.copy(grids0)
grids0_c.coords = grids1.coords
exc0_w = dft.numint.nr_rks(mf0._numint, mol0, grids0_w, xc, dm0)[1]
exc0_c = dft.numint.nr_rks(mf1._numint, mol1, grids0_c, xc, dm0)[1]
dexc_t = (exc1 - exc0) * denom
dexc_c = (exc0_c - exc0) * denom
dexc_w = (exc0_w - exc0) * denom
self.assertAlmostEqual(dexc_t, dexc_c+dexc_w, 4)
vxc = rks.get_vxc(mf._numint, mol, grids, xc, dm0)[1]
ev1, vxc1 = rks.get_vxc_full_response(mf._numint, mol, grids, xc, dm0)
p0, p1 = mol.aoslice_by_atom()[0][2:]
exc1_approx = numpy.einsum('xij,ij->x', vxc[:,p0:p1], dm0[p0:p1])*2
exc1_full = numpy.einsum('xij,ij->x', vxc1[:,p0:p1], dm0[p0:p1])*2 + ev1[0]
self.assertAlmostEqual(dexc_t, exc1_approx[2], 1)
#FIXME: exc1_full is quite different to the finite difference results, why?
self.assertAlmostEqual(dexc_t, exc1_full[2], 2)
def test_range_separated(self):
mf = dft.RKS(mol)
mf.conv_tol = 1e-14
mf.xc = 'wb97x'
mf.kernel()
self.assertAlmostEqual(mf.e_tot, -76.36324624711915, 12)
g = mf.nuc_grad_method().kernel()
self.assertAlmostEqual(lib.finger(g), -0.027003819523762924, 3)
mol1 = gto.M(atom="H; H 1 1.", basis='ccpvdz', verbose=0)
mf = dft.RKS(mol1)
mf.xc = 'wb97x'
mf.kernel()
g = mf.nuc_grad_method().kernel()
self.assertAlmostEqual(lib.finger(g), -0.17166479488374434, 5)
smf = mf.as_scanner()
mol1 = gto.M(atom="H; H 1 1.001", basis='ccpvdz')
mol2 = gto.M(atom="H; H 1 0.999", basis='ccpvdz')
dx = (mol1.atom_coord(1) - mol2.atom_coord(1))[0]
e1 = smf(mol1)
e2 = smf(mol2)
self.assertAlmostEqual((e1-e2)/dx, g[1,0], 5)
if __name__ == "__main__":
print("Full Tests for RKS Gradients")
unittest.main()
| sunqm/pyscf | pyscf/grad/test/test_rks.py | Python | apache-2.0 | 16,013 |
import atexit
from collections import defaultdict
from multiprocessing.pool import ThreadPool
import threading
import ray
from dask.core import istask, ishashable, _execute_task
from dask.local import get_async, apply_sync
from dask.system import CPU_COUNT
from dask.threaded import pack_exception, _thread_get_id
from .callbacks import local_ray_callbacks, unpack_ray_callbacks
from .common import unpack_object_refs
main_thread = threading.current_thread()
default_pool = None
pools = defaultdict(dict)
pools_lock = threading.Lock()
def ray_dask_get(dsk, keys, **kwargs):
"""
A Dask-Ray scheduler. This scheduler will send top-level (non-inlined) Dask
tasks to a Ray cluster for execution. The scheduler will wait for the
tasks to finish executing, fetch the results, and repackage them into the
appropriate Dask collections. This particular scheduler uses a threadpool
to submit Ray tasks.
This can be passed directly to `dask.compute()`, as the scheduler:
>>> dask.compute(obj, scheduler=ray_dask_get)
You can override the currently active global Dask-Ray callbacks (e.g.
supplied via a context manager), the number of threads to use when
submitting the Ray tasks, or the threadpool used to submit Ray tasks:
>>> dask.compute(
obj,
scheduler=ray_dask_get,
ray_callbacks=some_ray_dask_callbacks,
num_workers=8,
pool=some_cool_pool,
)
Args:
dsk (Dict): Dask graph, represented as a task DAG dictionary.
keys (List[str]): List of Dask graph keys whose values we wish to
compute and return.
ray_callbacks (Optional[list[callable]]): Dask-Ray callbacks.
num_workers (Optional[int]): The number of worker threads to use in
the Ray task submission traversal of the Dask graph.
pool (Optional[ThreadPool]): A multiprocessing threadpool to use to
submit Ray tasks.
Returns:
Computed values corresponding to the provided keys.
"""
num_workers = kwargs.pop("num_workers", None)
pool = kwargs.pop("pool", None)
# We attempt to reuse any other thread pools that have been created within
# this thread and with the given number of workers. We reuse a global
# thread pool if num_workers is not given and we're in the main thread.
global default_pool
thread = threading.current_thread()
if pool is None:
with pools_lock:
if num_workers is None and thread is main_thread:
if default_pool is None:
default_pool = ThreadPool(CPU_COUNT)
atexit.register(default_pool.close)
pool = default_pool
elif thread in pools and num_workers in pools[thread]:
pool = pools[thread][num_workers]
else:
pool = ThreadPool(num_workers)
atexit.register(pool.close)
pools[thread][num_workers] = pool
ray_callbacks = kwargs.pop("ray_callbacks", None)
with local_ray_callbacks(ray_callbacks) as ray_callbacks:
# Unpack the Ray-specific callbacks.
(
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
ray_postsubmit_all_cbs,
ray_finish_cbs,
) = unpack_ray_callbacks(ray_callbacks)
# NOTE: We hijack Dask's `get_async` function, injecting a different
# task executor.
object_refs = get_async(
_apply_async_wrapper(
pool.apply_async,
_rayify_task_wrapper,
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
),
len(pool._pool),
dsk,
keys,
get_id=_thread_get_id,
pack_exception=pack_exception,
**kwargs,
)
if ray_postsubmit_all_cbs is not None:
for cb in ray_postsubmit_all_cbs:
cb(object_refs, dsk)
# NOTE: We explicitly delete the Dask graph here so object references
# are garbage-collected before this function returns, i.e. before all
# Ray tasks are done. Otherwise, no intermediate objects will be
# cleaned up until all Ray tasks are done.
del dsk
result = ray_get_unpack(object_refs)
if ray_finish_cbs is not None:
for cb in ray_finish_cbs:
cb(result)
# cleanup pools associated with dead threads.
with pools_lock:
active_threads = set(threading.enumerate())
if thread is not main_thread:
for t in list(pools):
if t not in active_threads:
for p in pools.pop(t).values():
p.close()
return result
def _apply_async_wrapper(apply_async, real_func, *extra_args, **extra_kwargs):
"""
Wraps the given pool `apply_async` function, hotswapping `real_func` in as
the function to be applied and adding `extra_args` and `extra_kwargs` to
`real_func`'s call.
Args:
apply_async (callable): The pool function to be wrapped.
real_func (callable): The real function that we wish the pool apply
function to execute.
*extra_args: Extra positional arguments to pass to the `real_func`.
**extra_kwargs: Extra keyword arguments to pass to the `real_func`.
Returns:
A wrapper function that will ignore it's first `func` argument and
pass `real_func` in its place. To be passed to `dask.local.get_async`.
"""
def wrapper(func, args=(), kwds={}, callback=None): # noqa: M511
return apply_async(
real_func,
args=args + extra_args,
kwds=dict(kwds, **extra_kwargs),
callback=callback,
)
return wrapper
def _rayify_task_wrapper(
key,
task_info,
dumps,
loads,
get_id,
pack_exception,
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
):
"""
The core Ray-Dask task execution wrapper, to be given to the thread pool's
`apply_async` function. Exactly the same as `execute_task`, except that it
calls `_rayify_task` on the task instead of `_execute_task`.
Args:
key (str): The Dask graph key whose corresponding task we wish to
execute.
task_info: The task to execute and its dependencies.
dumps (callable): A result serializing function.
loads (callable): A task_info deserializing function.
get_id (callable): An ID generating function.
pack_exception (callable): An exception serializing function.
ray_presubmit_cbs (callable): Pre-task submission callbacks.
ray_postsubmit_cbs (callable): Post-task submission callbacks.
ray_pretask_cbs (callable): Pre-task execution callbacks.
ray_posttask_cbs (callable): Post-task execution callbacks.
Returns:
A 3-tuple of the task's key, a literal or a Ray object reference for a
Ray task's result, and whether the Ray task submission failed.
"""
try:
task, deps = loads(task_info)
result = _rayify_task(
task,
key,
deps,
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
)
id = get_id()
result = dumps((result, id))
failed = False
except BaseException as e:
result = pack_exception(e, dumps)
failed = True
return key, result, failed
def _rayify_task(
task,
key,
deps,
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
):
"""
Rayifies the given task, submitting it as a Ray task to the Ray cluster.
Args:
task (tuple): A Dask graph value, being either a literal, dependency
key, Dask task, or a list thereof.
key (str): The Dask graph key for the given task.
deps (dict): The dependencies of this task.
ray_presubmit_cbs (callable): Pre-task submission callbacks.
ray_postsubmit_cbs (callable): Post-task submission callbacks.
ray_pretask_cbs (callable): Pre-task execution callbacks.
ray_posttask_cbs (callable): Post-task execution callbacks.
Returns:
A literal, a Ray object reference representing a submitted task, or a
list thereof.
"""
if isinstance(task, list):
# Recursively rayify this list. This will still bottom out at the first
# actual task encountered, inlining any tasks in that task's arguments.
return [
_rayify_task(
t,
key,
deps,
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
) for t in task
]
elif istask(task):
# Unpacks and repacks Ray object references and submits the task to the
# Ray cluster for execution.
if ray_presubmit_cbs is not None:
alternate_returns = [
cb(task, key, deps) for cb in ray_presubmit_cbs
]
for alternate_return in alternate_returns:
# We don't submit a Ray task if a presubmit callback returns
# a non-`None` value, instead we return said value.
# NOTE: This returns the first non-None presubmit callback
# return value.
if alternate_return is not None:
return alternate_return
func, args = task[0], task[1:]
# If the function's arguments contain nested object references, we must
# unpack said object references into a flat set of arguments so that
# Ray properly tracks the object dependencies between Ray tasks.
object_refs, repack = unpack_object_refs(args, deps)
# Submit the task using a wrapper function.
object_ref = dask_task_wrapper.options(name=f"dask:{key!s}").remote(
func, repack, key, ray_pretask_cbs, ray_posttask_cbs, *object_refs)
if ray_postsubmit_cbs is not None:
for cb in ray_postsubmit_cbs:
cb(task, key, deps, object_ref)
return object_ref
elif not ishashable(task):
return task
elif task in deps:
return deps[task]
else:
return task
@ray.remote
def dask_task_wrapper(func, repack, key, ray_pretask_cbs, ray_posttask_cbs,
*args):
"""
A Ray remote function acting as a Dask task wrapper. This function will
repackage the given flat `args` into its original data structures using
`repack`, execute any Dask subtasks within the repackaged arguments
(inlined by Dask's optimization pass), and then pass the concrete task
arguments to the provide Dask task function, `func`.
Args:
func (callable): The Dask task function to execute.
repack (callable): A function that repackages the provided args into
the original (possibly nested) Python objects.
key (str): The Dask key for this task.
ray_pretask_cbs (callable): Pre-task execution callbacks.
ray_posttask_cbs (callable): Post-task execution callback.
*args (ObjectRef): Ray object references representing the Dask task's
arguments.
Returns:
The output of the Dask task. In the context of Ray, a
dask_task_wrapper.remote() invocation will return a Ray object
reference representing the Ray task's result.
"""
if ray_pretask_cbs is not None:
pre_states = [
cb(key, args) if cb is not None else None for cb in ray_pretask_cbs
]
repacked_args, repacked_deps = repack(args)
# Recursively execute Dask-inlined tasks.
actual_args = [_execute_task(a, repacked_deps) for a in repacked_args]
# Execute the actual underlying Dask task.
result = func(*actual_args)
if ray_posttask_cbs is not None:
for cb, pre_state in zip(ray_posttask_cbs, pre_states):
if cb is not None:
cb(key, result, pre_state)
return result
def ray_get_unpack(object_refs):
"""
Unpacks object references, gets the object references, and repacks.
Traverses arbitrary data structures.
Args:
object_refs: A (potentially nested) Python object containing Ray object
references.
Returns:
The input Python object with all contained Ray object references
resolved with their concrete values.
"""
if isinstance(object_refs, tuple):
object_refs = list(object_refs)
if isinstance(object_refs, list) and any(not isinstance(x, ray.ObjectRef)
for x in object_refs):
# We flatten the object references before calling ray.get(), since Dask
# loves to nest collections in nested tuples and Ray expects a flat
# list of object references. We repack the results after ray.get()
# completes.
object_refs, repack = unpack_object_refs(*object_refs)
computed_result = ray.get(object_refs)
return repack(computed_result)
else:
return ray.get(object_refs)
def ray_dask_get_sync(dsk, keys, **kwargs):
"""
A synchronous Dask-Ray scheduler. This scheduler will send top-level
(non-inlined) Dask tasks to a Ray cluster for execution. The scheduler will
wait for the tasks to finish executing, fetch the results, and repackage
them into the appropriate Dask collections. This particular scheduler
submits Ray tasks synchronously, which can be useful for debugging.
This can be passed directly to `dask.compute()`, as the scheduler:
>>> dask.compute(obj, scheduler=ray_dask_get_sync)
You can override the currently active global Dask-Ray callbacks (e.g.
supplied via a context manager):
>>> dask.compute(
obj,
scheduler=ray_dask_get_sync,
ray_callbacks=some_ray_dask_callbacks,
)
Args:
dsk (Dict): Dask graph, represented as a task DAG dictionary.
keys (List[str]): List of Dask graph keys whose values we wish to
compute and return.
Returns:
Computed values corresponding to the provided keys.
"""
ray_callbacks = kwargs.pop("ray_callbacks", None)
with local_ray_callbacks(ray_callbacks) as ray_callbacks:
# Unpack the Ray-specific callbacks.
(
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
ray_postsubmit_all_cbs,
ray_finish_cbs,
) = unpack_ray_callbacks(ray_callbacks)
# NOTE: We hijack Dask's `get_async` function, injecting a different
# task executor.
object_refs = get_async(
_apply_async_wrapper(
apply_sync,
_rayify_task_wrapper,
ray_presubmit_cbs,
ray_postsubmit_cbs,
ray_pretask_cbs,
ray_posttask_cbs,
),
1,
dsk,
keys,
**kwargs,
)
if ray_postsubmit_all_cbs is not None:
for cb in ray_postsubmit_all_cbs:
cb(object_refs, dsk)
# NOTE: We explicitly delete the Dask graph here so object references
# are garbage-collected before this function returns, i.e. before all
# Ray tasks are done. Otherwise, no intermediate objects will be
# cleaned up until all Ray tasks are done.
del dsk
result = ray_get_unpack(object_refs)
if ray_finish_cbs is not None:
for cb in ray_finish_cbs:
cb(result)
return result
| richardliaw/ray | python/ray/util/dask/scheduler.py | Python | apache-2.0 | 16,044 |
# MIT License
# Copyright (c) 2015, 2017 Marie Lemoine-Busserolle
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# STDLIB
import logging, os, sys, shutil, pkg_resources, argparse
from datetime import datetime
# LOCAL
# Import config parsing.
from ..configobj.configobj import ConfigObj
# Import custom Nifty functions.
from ..nifsUtils import datefmt, printDirectoryLists, writeList, getParam, interactiveNIFSInput
class GetConfig(object):
"""
A class to get configuration from the command line.
Configuration for each "Pipeline" and "Step" can come in one of 5 ways:
- The -i flag is provided. Then, an interactive input session is launched.
- An input file is provided. Configuration is read from that. Each pipeline and
step has its own required config file name.
- The -f flag and either a path or program ID is provided.
- The -r flag provided. The last data reduction is repeated. (Or resumed?)
"""
def __init__(self, args, script):
"""Return a getConfig object set up for script that is calling it."""
# Define constants.
# Paths to Nifty data.
self.RECIPES_PATH = pkg_resources.resource_filename('nifty', 'recipes/')
self.RUNTIME_DATA_PATH = pkg_resources.resource_filename('nifty', 'runtimeData/')
self.args = args
self.script = script
if self.script == "nifsPipeline":
self.configFile = "config.cfg"
self.makeConfig()
def makeConfig(self):
"""
Make a configuration file.
"""
# TODO(nat): This is really messy, and could probably be split up better. Find a better way.
# Parse command line options.
self.parser = argparse.ArgumentParser(description='Do a Gemini NIFS data reduction.')
# Create a configuration file interactively
self.parser.add_argument('-i', '--interactive', dest = 'interactive', default = False, action = 'store_true', help = 'Create a config.cfg file interactively.')
# Ability to repeat the last data reduction
self.parser.add_argument('-r', '--repeat', dest = 'repeat', default = False, action = 'store_true', help = 'Repeat the last data reduction, loading saved reduction parameters from runtimeData/config.cfg.')
# Ability to load a built-in configuration file (recipe)
self.parser.add_argument('-l', '--recipe', dest = 'recipe', action = 'store', help = 'Load data reduction parameters from the a provided recipe. Default is default_input.cfg.')
# Ability to load your own configuration file
self.parser.add_argument(dest = 'inputfile', nargs='?', action = 'store', help = 'Load data reduction parameters from <inputfile>.cfg.')
# Ability to do a quick and dirty fully automatic data reduction with no user input
self.parser.add_argument('-f', '--fullReductionPathOrProgramID', dest = 'fullReduction', default = False, action = 'store', help = 'Do a quick reduction from recipes/defaultConfig.cfg, specifying path to raw data or program ID.')
self.args = self.parser.parse_args(self.args)
self.interactive = self.args.interactive
self.repeat = self.args.repeat
self.fullReduction = self.args.fullReduction
self.inputfile = self.args.inputfile
if self.inputfile:
# Load input from a .cfg file user specified at command line.
if self.inputfile != self.configFile and os.path.exists('./'+ self.configFile):
os.remove('./'+ self.configFile)
shutil.copy(self.inputfile, './'+ self.configFile)
logging.info("\nPipeline configuration for this data reduction was read from " + str(self.inputfile) + \
", and if not named config.cfg, copied to ./config.cfg.")
# Check if the user specified at command line to repeat the last Reduction, do a full default data reduction from a
# recipe file or do a full data reduction from a handmade file.
if self.interactive:
# Get user input interactively.
logging.info('\nInteractively creating a ./config.cfg configuration file.')
self.fullReduction = interactiveNIFSInput()
if self.fullReduction:
# Copy default input and use it
if os.path.exists('./' + self.configFile):
os.remove('./' + self.configFile)
shutil.copy(self.RECIPES_PATH+'defaultConfig.cfg', './'+ self.configFile)
# Update default config file with path to raw data or program ID.
with open('./' + self.configFile, 'r') as self.config_file:
self.config = ConfigObj(self.config_file, unrepr=True)
self.sortConfig = self.config['sortConfig']
if self.fullReduction[0] == "G":
# Treat it as a program ID.
self.sortConfig['program'] = self.fullReduction
self.sortConfig['rawPath'] = ""
else:
# Else treat it as a path.
self.sortConfig['program'] = ""
self.sortConfig['rawPath'] = self.fullReduction
with open('./' + self.configFile, 'w') as self.outfile:
self.config.write(self.outfile)
logging.info("\nData reduction parameters for this reduction were copied from recipes/defaultConfig.cfg to ./config.cfg.")
| mrlb05/Nifty | nifty/pipeline/objectoriented/GetConfig.py | Python | mit | 6,414 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('runners', '0005_auto_20151106_0404'),
]
operations = [
migrations.RemoveField(
model_name='runtime',
name='architecture',
),
migrations.AddField(
model_name='runtime',
name='name',
field=models.CharField(default='error', max_length=8),
preserve_default=False,
),
]
| Turupawn/website | runners/migrations/0006_auto_20151111_0837.py | Python | agpl-3.0 | 561 |
import os
from callbacks.fsns_bbox_plotter import FSNSBBOXPlotter
# os.environ["MXNET_ENGINE_TYPE"] = "NaiveEngine"
# os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
import datetime
import train_utils as utils
from callbacks.save_bboxes import BBOXPlotter
from data_io.file_iter import FileBasedIter, _load_image
from data_io.fsns_file_iter import FSNSFileIter
from data_io.lstm_iter import InitStateLSTMIter
from initializers.spn_initializer import SPNInitializer, ShapeAgnosticLoad
from metrics.base import STNAccuracy, STNCrossEntropy
from metrics.ctc_metrics import CTCLoss, CTCSTNAccuracy
from metrics.fsns_metrics import FSNSPretrainAccuracy, FSNSPretrainCrossEntropy
from networks.fsns import FSNSNetwork
from utils.create_gif import make_gif
from utils.create_video import make_video
from utils.datatypes import Size
from operations.debug import *
from operations.ones import *
from operations.disable_shearing import *
from utils.plot_log import LogPlotter
if __name__ == '__main__':
parser = utils.parse_args()
args = parser.parse_args()
if args.send_bboxes and args.ip is None:
parser.print_usage()
raise ValueError("You must specify an upstream ip if you want to send the bboxes of each iteration")
time = datetime.datetime.now().isoformat()
args.log_dir = os.path.join(args.log_dir, "{}_{}".format(time, args.log_name))
args.log_file = os.path.join(args.log_dir, 'log')
image_size = Size(width=600, height=150)
source_shape = (args.batch_size, 4, 1, image_size.height, image_size.width)
target_shape = Size(width=75, height=50)
num_timesteps = 3
labels_per_timestep = 10
values_per_bbox = 0
num_rnn_layers = 1
label_width = 37
label_width = num_timesteps * labels_per_timestep
use_blstm = True
save_attention = False
eval_metric = mx.metric.CompositeEvalMetric(
metrics=[
STNAccuracy(make_label_time_major=True),
STNCrossEntropy(make_label_time_major=True),
]
)
net, loc, transformed_output, size_params = FSNSNetwork.get_network(
source_shape,
target_shape,
num_timesteps,
num_rnn_layers,
labels_per_timestep,
blstm=use_blstm,
fix_loc=args.fix_loc,
)
group = mx.symbol.Group([loc, transformed_output, net])
if args.plot_network_graph:
print("producing graph")
graph = mx.visualization.plot_network(net)
graph.render(os.path.join(args.log_dir, "graph.pdf"))
print("rendered graph")
print("loading data")
train_iter = InitStateLSTMIter(
base_iter=FSNSFileIter(
args.train_file,
args.batch_size,
label_width,
resize_to=image_size,
base_dir='/',
delimiter='\t',
image_mode='RGB',
),
num_lstm_layers=num_rnn_layers,
blstm=use_blstm,
state_size=256,
batch_size_multipliers=[4, 1],
)
num_images = train_iter.num_data
val_iter = InitStateLSTMIter(
base_iter=FSNSFileIter(
args.val_file,
args.batch_size,
label_width,
resize_to=image_size,
base_dir='/',
delimiter='\t',
image_mode='RGB',
),
num_lstm_layers=num_rnn_layers,
blstm=use_blstm,
state_size=256,
batch_size_multipliers=[4, 1],
)
iterations_per_epoch = num_images // args.batch_size
num_iterations = iterations_per_epoch * args.num_epochs
if args.eval_image:
image_dir = os.path.dirname(args.eval_image)
image_name = os.path.basename(args.eval_image)
bbox_data = _load_image(image_name, image_dir, target_size=image_size)
bbox_label = np.zeros(val_iter.label_shape, dtype=val_iter.label_dtype)
else:
first_batch = next(iter(val_iter))
val_iter.reset()
bbox_data = first_batch.data[0]
bbox_data = bbox_data.asnumpy()[1][np.newaxis, ...]
bbox_label = first_batch.label[0][1].asnumpy()
bbox_plotter = FSNSBBOXPlotter(
image_size,
target_shape,
args.log_dir,
save_attention=save_attention,
show_labels=args.char_map is not None,
plot_extra_loc=False,
send_bboxes=args.send_bboxes,
upstream_ip=args.ip,
upstream_port=args.port,
plot_individual_regions=False,
label_map=args.char_map,
blank_label=args.blank_label,
do_label_majority_vote=False,
)
callbacks = [bbox_plotter.get_callback(group, bbox_data, bbox_label, num_data=num_images, batch_num=1, show_gt_bboxes=False)]
initializer = SPNInitializer(factor_type="in", magnitude=2.34, zoom=args.zoom)
if args.model_prefix is not None:
initializer = ShapeAgnosticLoad(args.model_prefix, default_init=initializer, verbose=True)
# train
print("start training")
utils.fit(args, net, (train_iter, val_iter), num_images / args.batch_size, initializer, eval_metric, batch_end_callback=callbacks)
if hasattr(train_iter, 'shutdown'):
train_iter.shutdown()
if hasattr(val_iter, 'shutdown'):
val_iter.shutdown()
log_plotter = LogPlotter(args.log_file)
plot = log_plotter.plot()
plot.savefig(os.path.join(args.log_dir, 'plot.png'))
if args.gif:
make_gif(
bbox_plotter.bbox_dir,
os.path.join(bbox_plotter.base_dir, "bboxes.gif"),
image_stride=num_iterations // min(num_iterations, 2000)
)
if args.video:
make_video(
bbox_plotter.bbox_dir,
os.path.join(bbox_plotter.base_dir, "bboxes.mpeg"),
)
| Bartzi/stn-ocr | mxnet/train_fsns.py | Python | gpl-3.0 | 5,709 |
'''
Given an input GFF3 file, the database name and a file with the relationship
between the gff3 IDs and the database accessions for each feature the script
adds the dbxref to the GFF3 file
'''
from optparse import OptionParser
from franklin.gff import modify_gff3, create_go_annot_mapper
def parse_options():
'It parses the command line arguments'
parser = OptionParser()
parser.add_option('-i', '--ingff3', dest='ingff',
help='Input GFF3 file')
parser.add_option('-o', '--outgff3', dest='outgff',
help='Output GFF3 file')
parser.add_option('-a', '--annots', dest='annots',
help='b2g annot file')
return parser
def get_parameters():
'It reads and fixes the parsed command line options'
parser = parse_options()
options = parser.parse_args()[0]
if not options.ingff:
parser.error('An input GFF3 file is required')
else:
ingff3_fpath = options.ingff
if not options.outgff:
parser.error('An output GFF3 file is required')
else:
outgff3_fpath = options.outgff
if not options.annots:
parser.error('b2g annot file is required')
else:
annots_fhand = open(options.annots)
return ingff3_fpath, outgff3_fpath, annots_fhand
def main():
'It runs the script'
ingff3_fpath, outgff3_fpath, annots_fhand = get_parameters()
mappers = []
mappers.append(create_go_annot_mapper(annots_fhand))
modify_gff3(ingff3_fpath, outgff3_fpath, mappers)
if __name__ == '__main__':
main()
| JoseBlanca/franklin | scripts/gmod/add_go_term_to_gff3.py | Python | agpl-3.0 | 1,572 |
import os
from frb.cfx import CFX
from frb.utils import find_file
from frb.raw_data import M5, dspec_cat
from frb.search_candidates import Searcher
from frb.dedispersion import de_disperse_cumsum
from frb.search import search_candidates, create_ellipses
from frb.queries import query_frb, connect_to_db
# Setup
exp_code = 'raks12er'
cfx_file = '/home/ilya/code/frb/frb/RADIOASTRON_RAKS12ER_L_20151105T130000_ASC_V1.cfx'
data_dir = '/mnt/frb_data/raw_data/2015_309_raks12er'
dspec_params = {'nchan': 64, 'dt': 1, 'offst': 0, 'dur': 10, 'outfile': None}
# Split an M5-file into [sec] intervals
split_duration = 0.5
cobj = CFX(cfx_file)
cfx_data = cobj.parse_cfx(exp_code)
if cobj.freq == 'K':
print("Skipping K-band CFX file: {}".format(os.path.basename(cfx_file)))
print("NOTE: You can delete following files from data path:")
print(cfx_data)
for fname, params in cfx_data.items():
fname = fname.split(".")[0]
import glob
m5file = glob.glob(os.path.join(os.path.join(data_dir, params[1].lower()),
fname + "*"))[0]
m5file_fmt = params[2] # Raw data format
cfx_fmt = params[-1] # Rec configuration
m5 = M5(m5file, m5file_fmt)
offst = 0
dspec_params.update({'dur': split_duration})
while offst*32e6 < m5.size:
dspec_params.update({'offst':offst})
# print dspec_params
ds = m5.create_dspec(**dspec_params)
# NOTE: all 4 channels are stacked forming dsarr:
dsarr = dspec_cat(os.path.basename(ds['Dspec_file']), cfx_fmt)
metadata = ds
metadata['Raw_data_file'] = fname
metadata['Exp_data'] = params
print "BRV SEARCHING..." # search brv in array here
# TODO: save search results, delete data, ...
offst = offst + split_duration
antennas = list()
antennas = ['AR', 'EF', 'RA']
# Step of de-dispersion
d_dm = 25.
for antenna in antennas:
meta_data = {'antenna': antenna, 'freq': 'L', 'band': 'U', 'pol': 'R',
'exp_code': 'raks00', 'nu_max': 1684., 't_0': t,
'd_nu': 16./256., 'd_t': 0.001}
# Values of DM to de-disperse
dm_grid = np.arange(0., 1000., d_dm)
# Initialize searcher class
searcher = Searcher(dsp=frame.values, meta_data=meta_data)
# Run search for FRB with some parameters of de-dispersion, pre-processing,
# searching algorithms
candidates = searcher.run(de_disp_func=de_disperse_cumsum,
search_func=search_candidates,
preprocess_func=create_ellipses,
de_disp_args=[dm_grid],
search_kwargs={'n_d_x': 5., 'n_d_y': 15.,
'd_dm': d_dm},
preprocess_kwargs={'disk_size': 3,
'threshold_perc': 98.,
'statistic': 'mean'},
db_file='/home/ilya/code/akutkin/frb/frb/frb.db')
print "Found {} pulses".format(len(candidates))
for candidate in candidates:
print candidate
session = connect_to_db("/home/ilya/code/akutkin/frb/frb/frb.db")
# Query DB
frb_list = query_frb(session, exp_code, d_dm=100., d_t=0.1)
for frb in frb_list:
print frb
| ipashchenko/frb | examples/process_one_experiment.py | Python | apache-2.0 | 3,337 |
#!/usr/bin/env python
import sys
import os
from lib.util import safe_mkdir, extract_zip, tempdir, download
SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
FRAMEWORKS_URL = 'https://github.com/atom/atom-shell-frameworks/releases' \
'/download/v0.0.2'
def main():
os.chdir(SOURCE_ROOT)
safe_mkdir('frameworks')
download_and_unzip('Mantle')
download_and_unzip('ReactiveCocoa')
download_and_unzip('Squirrel')
def download_and_unzip(framework):
zip_path = download_framework(framework)
if zip_path:
extract_zip(zip_path, 'frameworks')
def download_framework(framework):
framework_path = os.path.join('frameworks', framework) + '.framework'
if os.path.exists(framework_path):
return
filename = framework + '.framework.zip'
url = FRAMEWORKS_URL + '/' + filename
download_dir = tempdir(prefix='atom-shell-')
path = os.path.join(download_dir, filename)
download('Download ' + framework, url, path)
return path
if __name__ == '__main__':
sys.exit(main())
| rwaldron/atom-shell | script/update-frameworks.py | Python | mit | 1,042 |
#!/usr/bin/env python3
from sympy import *
from mpmath import *
from matplotlib.pyplot import *
import matplotlib.ticker as plticker
import numpy as np
#init_printing() # make things prettier when we print stuff for debugging.
# ************************************************************************** #
# B-Field, Cylinder Coild with Massive Alu Cylinder #
# ************************************************************************** #
# All values are in standard SI units unless otherwise noted.
# ---------------------------------------------------------#
# Init, Define Variables and Constants #
# ---------------------------------------------------------#
mu0 = 4*pi*1e-7
#sigma = 37.7e6 # conductivity of aluminium (de.wikipedia.org)
rho_kuchling = 0.027e-6 # resistivity Kuchling 17th edition, p.649, tab. 45
sigma_kuchling = 1/rho_kuchling
#sigma = 21.5e6
sigma_abs = 22.5e6 # great fit for phase
sigma_arg = 23.5e6 # great fit for phase
r0 = 45e-3
#B0 = 6.9e-2 # adjust this as needed for scaling
B0 = 6.6e-2 # adjust this as needed for scaling
freq = 30 # frequency was fixed at 450 Hz
npts = 1e3
rmin=0
#rmax=50e-3
rmax=45e-3
# -----------------------------------------------------#
# Create a list for convenient printing of vars to #
# file, add LaTeX where necessary. #
# -----------------------------------------------------#
params = [
' ' + r'\textcolor{red}{$\sigma_{Fit,|\hat{B}|}' + r'$} & \textcolor{red}{$' + '\SI{' + str(sigma_abs) + r'}{\ampere\per\volt\per\meter}' + r'$}\\' + "\n",
' ' + r'\textcolor{red}{$\sigma_{Fit,\angle\hat{B}}' + r'$} & \textcolor{red}{$' + '\SI{' + str(sigma_arg) + r'}{\ampere\per\volt\per\meter}' + r'$}\\' + "\n",
' ' + r'\textcolor{red}{$\sigma_{Kuch}' + r'$} & \textcolor{red}{$' + '\SI{' + str(sigma_kuchling) + r'}{\ampere\per\volt\per\meter}' + r'$}\\' + "\n",
' ' + '$\mu_0' + '$ & $' + '\SI{' + str(mu0) + r'}{\newton\per\ampere\squared}' + r'$\\' + "\n",
' ' + '$r_0' + '$ & $' + '\SI{' + str(r0) + r'}{\meter}' + r'$\\' + "\n",
' ' + '$r_{max}' + '$ & $' + '\SI{' + str(rmax) + r'}{\meter}' + r'$\\' + "\n",
' ' + '$r_{min}' + '$ & $' + '\SI{' + str(rmin) + r'}{\meter}' + r'$\\' + "\n",
' ' + '$B_0' + '$ & $' + '\SI{' + str(B0) + r'}{\tesla}' + r'$\\' + "\n",
' ' + '$NPTS' + '$ & $' + r'\num{' + str(npts) + '}' + r'$\\' + "\n",
' ' + '$f' + '$ & $' + '\SI{' + str(freq) + r'}{\hertz}' + r'$\\' + "\n",
]
font = {
'family' : 'serif',
'color' : 'black',
'weight' : 'normal',
'size' : 9,
}
titlefont = {
'family' : 'serif',
'color' : 'black',
'weight' : 'normal',
'size' : 10,
}
plot_legend_fontsize = 9
plot_color_fit = 'blue'
plot_color_measurements = 'black'
plot_label_measurements = 'Messwerte'
plot_size_measurements = 16
plot_scale_x = 'linear'
plot_label_fit = 'Fit-Funktion'
plot_label_x = 'radiale Position bezogen auf Zylinderachse (mm)'
plot_1_label_y = 'gemessene Spannung (mV)'
plot_2_label_y = 'Phase (Grad)'
plot_1_title = r"Exakte L\"osung: Betrag Magnetfeld Spule mit Vollzylinder (30 Hz)"
plot_2_title = r"Exakte L\"osung: Phase Magnetfeld Spule mit Vollzylinder (30 Hz)"
loc = plticker.MultipleLocator(base=5)
# ---------------------------------------------------------#
# Functions #
# #
# See formula 21 on p.11 of script for experiment. #
# #
# NOTE: We use frequency f instead of angular frequency #
# omega since that is what we actually set on the function #
# generator. #
# NOTE: We evaluate B_abs and B_arg based on two different #
# values for sigma, which allows to fit each of the curves #
# more accurately. #
# ---------------------------------------------------------#
k_abs = lambda f: sqrt((2*pi*f*mu0*sigma_abs)/2)*(mpc(1,-1))
k_arg = lambda f: sqrt((2*pi*f*mu0*sigma_arg)/2)*(mpc(1,-1))
# Enumerator:
enum_abs = lambda r: besselj(0,k_abs(freq)*r)
denom_abs = besselj(0,k_abs(freq)*r0)
enum_arg = lambda r: besselj(0,k_arg(freq)*r)
denom_arg = besselj(0,k_arg(freq)*r0)
B_abs = lambda r: abs(enum_abs(r) / denom_abs * B0)
B_arg = lambda r: arg(enum_arg(r) / denom_arg * B0)
# ---------------------------------------------------------#
# Generate points for radius axis #
# ---------------------------------------------------------#
radii = np.linspace(rmin,rmax,npts)
# ---------------------------------------------------------#
# Numerically evaluate function #
# ---------------------------------------------------------#
Babsufunc = np.frompyfunc(B_abs,1,1)
B_abs_num = Babsufunc(radii)
Bargufunc = np.frompyfunc(B_arg,1,1)
B_arg_num = Bargufunc(radii)
# ---------------------------------------------------------#
# Unfortunately, the arg() function only delivers values #
# between -pi and +pi for the angle of a complex number, #
# which, while correct, is not suitable for pretty #
# plotting, so we will shift the values larger then zero #
# accordingly for a continuous curve. #
# ---------------------------------------------------------#
B_arg_num = np.unwrap(B_arg_num)
# ---------------------------------------------------------#
# Measurement Values from the actual experiment #
# ---------------------------------------------------------#
radii_measured = np.array([ 0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50])
voltages = np.array([2.86e-2,2.85e-2,2.87e-2,2.9e-2,3e-2,3.3e-2,3.8e-2,4.5e-2,5.4e-2,6.2e-2,3.7e-2])
phases_degrees = np.array([ 111, 109, 104, 94, 81, 65, 48.5, 32, 16, 2.7, 0])
# ---------------------------------------------------------#
# Scale values for improved legibility in plot #
# ---------------------------------------------------------#
# We scale from meters to millimeters, from rad to degress.
B_abs_num = 1e3 * B_abs_num
radii = 1e3 * radii
voltages = 1e3 * voltages
B_arg_num = 180/pi*B_arg_num
rmin = 1e3 * rmin
rmax = 1e3 * rmax
# ---------------------------------------------------------#
# Plot the Things #
# ---------------------------------------------------------#
matplotlib.pyplot.rc('text', usetex=True)
matplotlib.pyplot.rc('font', family='serif')
fig = figure(1)
axes1 = fig.add_subplot(211)
axes1.plot(radii,B_abs_num,color=plot_color_fit,label=plot_label_fit)
axes1.scatter(radii_measured,
voltages,
color=plot_color_measurements,
s=plot_size_measurements,
label=plot_label_measurements
)
axes1.set_xlim([rmin-1,rmax*1.1+5])
axes1.set_xscale(plot_scale_x)
axes1.set_xlabel(plot_label_x,fontdict=font)
axes1.set_ylabel(plot_1_label_y,fontdict=font)
axes1.set_title(plot_1_title,fontdict=titlefont)
axes1.legend(fontsize=plot_legend_fontsize,loc='upper left')
axes1.xaxis.set_major_locator(loc)
axes1.tick_params(labelsize=9)
axes2 = fig.add_subplot(212)
axes2.plot(radii,B_arg_num,color=plot_color_fit,label=plot_label_fit)
axes2.scatter(radii_measured,
-phases_degrees,
color=plot_color_measurements,
s=plot_size_measurements,
label=plot_label_measurements
)
axes2.set_xlim([rmin-1,rmax*1.1+5])
axes2.set_xscale(plot_scale_x)
axes2.set_xlabel(plot_label_x,fontdict=font)
axes2.set_ylabel(plot_2_label_y,fontdict=font)
axes2.set_title(plot_2_title,fontdict=titlefont)
axes2.legend(fontsize=plot_legend_fontsize,loc='upper left')
axes2.xaxis.set_major_locator(loc)
axes2.tick_params(labelsize=9)
fig.subplots_adjust(bottom=0.1,left=0.1,right=0.9,top=0.95,hspace=0.5)
fig.savefig('plots-pgf/massive--alu--low-freq--exact.pgf')
fig.savefig('plots-pdf/massive--alu--low-freq--exact.pdf')
# ---------------------------------------------------------#
# Save listing to file #
# ---------------------------------------------------------#
dumpfile = open('listings/massive--alu--low-freq--exact.tex', 'w')
table_opening = r"""
{%
\begin{center}
\captionof{table}{%
Parameter f\"ur Fit-Funktion aus Abbildung~\ref{fig:alu:rad:low:sensor}
}
\label{tab:fitparams:alu:freq:low:exact}
\sisetup{%
%math-rm=\mathtt,
scientific-notation=engineering,
table-format = +3.2e+2,
round-precision = 3,
round-mode = figures,
}
\begin{tabular}{lr}
\toprule
"""
table_closing = r"""
\bottomrule
\end{tabular}
\end{center}
}
"""
dumpfile.writelines(table_opening)
for line in params:
dumpfile.writelines(line)
dumpfile.writelines(table_closing)
dumpfile.close()
# ---------------------------------------------------------#
# Save Value of sigma to file for error analysis #
# ---------------------------------------------------------#
np.savetxt('numpy-txt/massive--alu--low-freq--exact.txt',([sigma_abs,sigma_arg]))
| alpenwasser/laborjournal | versuche/skineffect/python/vollzylinder_lowfreq.py | Python | mit | 9,786 |
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from threading import Thread
import numpy as np
from Config import Config
class ThreadPredictor(Thread):
def __init__(self, server, id):
super(ThreadPredictor, self).__init__()
self.setDaemon(True)
self.id = id
self.server = server
self.exit_flag = False
def run(self):
ids = np.zeros(Config.PREDICTION_BATCH_SIZE, dtype=np.uint16)
states = np.zeros(
(Config.PREDICTION_BATCH_SIZE, Config.IMAGE_HEIGHT, Config.IMAGE_WIDTH, Config.CHANNELS*Config.STACKED_FRAMES),
dtype=np.float32)
while not self.exit_flag:
ids[0], states[0] = self.server.prediction_q.get()
size = 1
while size < Config.PREDICTION_BATCH_SIZE and not self.server.prediction_q.empty():
ids[size], states[size] = self.server.prediction_q.get()
size += 1
batch = states[:size]
p, v = self.server.model.predict_p_and_v(batch)
for i in range(size):
if ids[i] < len(self.server.agents):
self.server.agents[ids[i]].wait_q.put((p[i], v[i]))
| suqi/gym-sandbox | test_algos/GA3C_TF/ThreadPredictor.py | Python | mit | 2,679 |
#-*- coding: utf-8 -*-
# Implements Colobot model formats
# Copyright (c) 2014 Tomasz Kapuściński
import modelformat
import geometry
import struct
class ColobotNewTextFormat(modelformat.ModelFormat):
def __init__(self):
self.description = 'Colobot New Text format'
def get_extension(self):
return 'txt'
def read(self, filename, model, params):
input_file = open(filename, 'r')
triangle = geometry.Triangle()
materials = []
while True:
line = input_file.readline()
# eof
if len(line) == 0:
break
# comments are ignored
if line[0] == '#':
continue
# remove eol
if line[len(line)-1] == '\n':
line = line[:len(line)-1]
values = line.split(' ');
cmd = values[0]
if cmd == 'version':
model.version = int(values[1])
elif cmd == 'triangles':
continue
elif cmd == 'p1':
triangle.vertices[0] = parse_vertex(values)
elif cmd == 'p2':
triangle.vertices[1] = parse_vertex(values)
elif cmd == 'p3':
triangle.vertices[2] = parse_vertex(values)
elif cmd == 'mat':
triangle.material = parse_material(values)
elif cmd == 'tex1':
triangle.material.texture = values[1]
elif cmd == 'tex2':
triangle.material.texture2 = values[1]
elif cmd == 'var_tex2':
continue
elif cmd == 'lod_level':
triangle.material.lod = int(values[1])
elif cmd == 'state':
triangle.material.state = int(values[1])
mat_final = None
for mat in materials:
if triangle.material == mat:
mat_final = mat
if mat_final is None:
mat_final = triangle.material
materials.append(mat_final)
triangle.material = mat_final
model.triangles.append(triangle)
triangle = geometry.Triangle()
input_file.close()
return True
def write(self, filename, model, params):
output_file = open(filename, 'w')
version = 2
if 'version' in params:
version = int(params['version'])
# write header
output_file.write('# Colobot text model\n')
output_file.write('\n')
output_file.write('### HEAD\n')
output_file.write('version ' + str(version) + '\n')
output_file.write('total_triangles ' + str(len(model.triangles)) + '\n')
output_file.write('\n')
output_file.write('### TRIANGLES\n')
# write triangles
for triangle in model.triangles:
# write vertices
for i in range(3):
vertex = triangle.vertices[i]
output_file.write('p{} c {} {} {}'.format(i+1, vertex.x, vertex.y, vertex.z))
output_file.write(' n {} {} {}'.format(vertex.nx, vertex.ny, vertex.nz))
output_file.write(' t1 {} {}'.format(vertex.u1, vertex.v1))
output_file.write(' t2 {} {}\n'.format(vertex.u2, vertex.v2))
mat = triangle.material
dirt = 'N'
dirt_texture = ''
if 'dirt' in params:
dirt = 'Y'
dirt_texture = params['dirt']
output_file.write('mat dif {} {} {} {}'.format(mat.diffuse[0], mat.diffuse[1], mat.diffuse[2], mat.diffuse[3]))
output_file.write(' amb {} {} {} {}'.format(mat.ambient[0], mat.ambient[1], mat.ambient[2], mat.ambient[3]))
output_file.write(' spc {} {} {} {}\n'.format(mat.specular[0], mat.specular[1], mat.specular[2], mat.specular[3]))
output_file.write('tex1 {}\n'.format(mat.texture))
output_file.write('tex2 {}\n'.format(dirt_texture))
output_file.write('var_tex2 {}\n'.format(dirt))
if version == 1:
output_file.write('lod_level 0\n')
output_file.write('state ' + str(mat.state) + '\n')
output_file.write('\n')
output_file.close()
return True
class ColobotOldFormat(modelformat.ModelFormat):
def __init__(self):
self.description = 'Colobot Old Binary format'
def get_extension(self):
return 'mod'
def read(self, filename, model, params):
input_file = open(filename, 'rb')
# read header
version_major = struct.unpack('=i', input_file.read(4))[0]
version_minor = struct.unpack('=i', input_file.read(4))[0]
triangle_count = struct.unpack('=i', input_file.read(4))[0]
if version_major != 1 or version_minor != 2:
print('Unsupported format version: {}.{}'.format(version_major, version_minor))
return False
# read and ignore padding
input_file.read(40)
materials = []
for index in range(triangle_count):
triangle = geometry.Triangle()
# used, selected, 2 byte padding
input_file.read(4)
for vertex in triangle.vertices:
# position, normal, uvs
floats = struct.unpack('=ffffffffff', input_file.read(40))
vertex.x = floats[0]
vertex.y = floats[1]
vertex.z = floats[2]
vertex.nx = floats[3]
vertex.ny = floats[4]
vertex.nz = floats[5]
vertex.u1 = floats[6]
vertex.v1 = floats[7]
vertex.u2 = floats[8]
vertex.v2 = floats[9]
# material colors
floats = struct.unpack('=fffffffffffffffff', input_file.read(17 * 4))
mat = triangle.material
for i in range(4):
mat.diffuse[i] = floats[0 + i]
mat.ambient[i] = floats[4 + i]
mat.specular[i] = floats[8 + i]
# texture name
chars = input_file.read(20)
for i in range(20):
if chars[i] == '\0':
mat.texture = struct.unpack('={}s'.format(i), chars[:i])[0]
break
values = struct.unpack('=ffiHHHH', input_file.read(20))
mat.state = values[2]
dirt = values[3]
if dirt != 0:
mat.texture2 = 'dirty{:02d}.png'.format(dirt)
# optimizing materials
replaced = False
for material in materials:
if mat == material:
triangle.material = material
replaced = True
break
if not replaced:
materials.append(mat)
model.triangles.append(triangle)
# end of triangle
input_file.close()
return True
def write(self, filename, model, params):
output_file = open(filename, 'wb')
# write header
output_file.write(struct.pack('i', 1)) # version major
output_file.write(struct.pack('i', 2)) # version minor
output_file.write(struct.pack('i', len(model.triangles))) # total triangles
# padding
for x in range(10):
output_file.write(struct.pack('i', 0))
# triangles
for triangle in model.triangles:
output_file.write(struct.pack('=B', True)) # used
output_file.write(struct.pack('=B', False)) # selected ?
output_file.write(struct.pack('=H', 0)) # padding (2 bytes)
# write vertices
for vertex in triangle.vertices:
output_file.write(struct.pack('=fff', vertex.x, vertex.y, vertex.z)) # vertex coord
output_file.write(struct.pack('=fff', vertex.nx, vertex.ny, vertex.nz)) # normal
output_file.write(struct.pack('=ff', vertex.u1, vertex.v1)) # tex coord 1
output_file.write(struct.pack('=ff', vertex.u2, vertex.v2)) # tex coord 2
# material info
mat = triangle.material
output_file.write(struct.pack('=ffff', mat.diffuse[0], mat.diffuse[1], mat.diffuse[2], mat.diffuse[3])) # diffuse color
output_file.write(struct.pack('=ffff', mat.ambient[0], mat.ambient[1], mat.ambient[2], mat.ambient[3])) # ambient color
output_file.write(struct.pack('=ffff', mat.specular[0], mat.specular[1], mat.specular[2], mat.specular[3])) # specular color
output_file.write(struct.pack('=ffff', 0.0, 0.0, 0.0, 0.0)) # emissive color
output_file.write(struct.pack('=f', 0.0)) # power
# texture name
output_file.write(mat.texture.encode('utf-8'))
# texture name padding
for i in range(20 - len(mat.texture)):
output_file.write(struct.pack('=x'))
dirt = 0
if 'dirt' in params:
dirt = int(params['dirt'])
output_file.write(struct.pack('=ff', 0.0, 10000.0)) # rendering range
output_file.write(struct.pack('i', mat.state)) # state
output_file.write(struct.pack('=H', dirt)) # dirt texture
output_file.write(struct.pack('=HHH', 0, 0, 0)) # reserved
output_file.close()
return True
def parse_vertex(values):
vertex_coord = geometry.VertexCoord(float(values[2]), float(values[3]), float(values[4]))
normal = geometry.Normal(float(values[6]), float(values[7]), float(values[8]))
tex_coord_1 = geometry.TexCoord(float(values[10]), float(values[11]))
tex_coord_2 = geometry.TexCoord(float(values[13]), float(values[14]))
return geometry.Vertex(vertex_coord, normal, tex_coord_1, tex_coord_2)
def parse_material(values):
material = geometry.Material()
for i in range(4):
material.diffuse[i] = float(values[2+i])
material.ambient[i] = float(values[7+i])
material.specular[i] = float(values[12+i])
return material
modelformat.register_format('colobot', ColobotOldFormat())
modelformat.register_format('old', ColobotOldFormat())
modelformat.register_format('new_txt', ColobotNewTextFormat())
modelformat.register_extension('mod', 'old')
modelformat.register_extension('txt', 'new_txt')
| tomaszkax86/Colobot-Model-Converter | colobotformat.py | Python | bsd-2-clause | 11,044 |
#!/usr/bin/env python
#
# Beautiful Capi generates beautiful C API wrappers for your C++ classes
# Copyright (C) 2015 Petr Petrovich Petrov
#
# This file is part of Beautiful Capi.
#
# Beautiful Capi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Beautiful Capi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Beautiful Capi. If not, see <http://www.gnu.org/licenses/>.
#
import argparse
from xml.dom.minidom import parse
import FileGenerator
def string_to_bool(string_value):
return string_value.lower() in ['true', 'on', 'yes', '1']
def string_to_int(string_value):
return int(string_value)
def get_name_for_field(element):
name = element.getAttribute('name')
if name == 'return':
name = 'return_type'
elif name == 'return_filled':
name = 'return_type_filled'
elif name == 'type':
name = 'type_name'
elif name == 'type_filled':
name = 'type_name_filled'
return name
def copy_source(output_file, code_object):
import inspect
for line in inspect.getsourcelines(code_object)[0]:
output_file.put_line(line.strip('\n'))
output_file.put_line('')
output_file.put_line('')
class SchemaGenerator(object):
def __init__(self, input_filename, output_filename):
self.input_xsd = parse(input_filename)
self.output_file = FileGenerator.FileGenerator(output_filename)
def build_python_scripts(self):
self.output_file.put_python_header()
self.output_file.put_python_gnu_gpl_copyright_header()
self.output_file.put_python_automatic_generation_warning()
self.output_file.put_line('from enum import Enum\n\n')
copy_source(self.output_file, string_to_bool)
copy_source(self.output_file, string_to_int)
for simple_type in self.input_xsd.getElementsByTagName('xs:simpleType'):
self.__build_enum(simple_type)
for complex_type in self.input_xsd.getElementsByTagName('xs:complexType'):
self.__build_structure(complex_type)
self.__build_load_root()
del self.output_file
def __build_load_root(self):
self.output_file.put_line('def load(dom_node):')
with FileGenerator.Indent(self.output_file):
for root_element in self.input_xsd.childNodes:
for element in root_element.childNodes:
if element.nodeName == 'xs:element':
self.output_file.put_line(
'for root_element in [root for root in dom_node.childNodes if root.localName == "{0}"]:'
.format(element.getAttribute('name'))
)
with FileGenerator.Indent(self.output_file):
self.output_file.put_line('root_params = {0}()'.format(element.getAttribute('type')))
self.output_file.put_line('root_params.load(root_element)')
self.output_file.put_line('return root_params')
def __build_enum(self, enumerator):
self.output_file.put_line('class {enum_name}(Enum):'.format(enum_name=enumerator.getAttribute('name')))
enum_counter = 0
with FileGenerator.Indent(self.output_file):
for enumeration in enumerator.getElementsByTagName('xs:enumeration'):
self.output_file.put_line('{0} = {1}'.format(
enumeration.getAttribute('value'),
enum_counter)
)
enum_counter += 1
self.output_file.put_line('')
with FileGenerator.Indent(self.output_file):
self.output_file.put_line('@staticmethod')
self.output_file.put_line('def load(value):')
with FileGenerator.Indent(self.output_file):
for enumeration in enumerator.getElementsByTagName('xs:enumeration'):
self.output_file.put_line('if value == "{0}":'.format(enumeration.getAttribute('value')))
with FileGenerator.Indent(self.output_file):
self.output_file.put_line('return {0}.{1}'.format(
get_name_for_field(enumerator),
enumeration.getAttribute('value')
))
self.output_file.put_line('raise ValueError')
self.output_file.put_line('')
self.output_file.put_line('')
def __build_structure(self, complex_type):
base_class = 'object'
for cur_base_class in complex_type.getElementsByTagName('xs:extension'):
base_class = cur_base_class.getAttribute('base')
self.output_file.put_line('class {0}({1}):'.format(complex_type.getAttribute('name'), base_class))
with FileGenerator.Indent(self.output_file):
self.__build_structure_impl(complex_type, base_class)
self.output_file.put_line('')
def __build_structure_impl(self, complex_type, base_class):
self.__build_constructor(complex_type, base_class)
self.output_file.put_line('')
self.__build_load_element(complex_type, base_class)
self.output_file.put_line('')
self.__build_load_attributes(complex_type, base_class)
self.output_file.put_line('')
self.__build_load()
self.output_file.put_line('')
@staticmethod
def __get_array_name(name):
if name == 'returns':
return name
suffix = 's'
prefix = name
if name[-1] == 's' or name[-1] == 'x':
suffix = 'es'
elif name[-1] == 'y':
suffix = 'ies'
prefix = name[:-1]
return '{0}{1}'.format(prefix, suffix)
@staticmethod
def __get_attribute_default_value(attribute):
if attribute.getAttribute('type') == 'xs:string':
if attribute.hasAttribute('default'):
return '"' + attribute.getAttribute('default') + '"'
return '""'
if attribute.getAttribute('type') == 'xs:boolean':
if attribute.hasAttribute('default'):
return str(string_to_bool(attribute.getAttribute('default')))
return "False"
if attribute.getAttribute('type') == 'xs:integer':
if attribute.hasAttribute('default'):
return str(string_to_int(attribute.getAttribute('default')))
return 0
if attribute.hasAttribute('default'):
return attribute.getAttribute('type') + '.' + attribute.getAttribute('default')
def __build_constructor(self, complex_type, base_class):
self.output_file.put_line('def __init__(self):')
with FileGenerator.Indent(self.output_file):
if base_class != 'object':
self.output_file.put_line('super().__init__()')
else:
self.output_file.put_line('self.all_items = []')
for attribute in complex_type.getElementsByTagName('xs:attribute'):
self.__build_init_field(attribute)
for element in complex_type.getElementsByTagName('xs:element'):
self.output_file.put_line('self.{0} = []'.format(
SchemaGenerator.__get_array_name(get_name_for_field(element))
))
def __build_init_field(self, field):
self.output_file.put_line('self.{0} = {1}'.format(
get_name_for_field(field),
self.__get_attribute_default_value(field)
))
self.output_file.put_line('self.{0}_filled = False'.format(get_name_for_field(field)))
def __build_load_element_item(self, element, mixed):
self.output_file.put_line('if element.nodeName == "{0}":'.format(element.getAttribute('name')))
with FileGenerator.Indent(self.output_file):
if element.getAttribute('type') == 'xs:string':
self.output_file.put_line('new_element = "{0}"'.format(element.getAttribute('default')))
self.output_file.put_line(
'for text in [text for text in element.childNodes if text.nodeType == text.TEXT_NODE]:'
)
with FileGenerator.Indent(self.output_file):
self.output_file.put_line('new_element += text.nodeValue')
else:
self.output_file.put_line('new_element = {0}()'.format(element.getAttribute('type')))
self.output_file.put_line('new_element.load(element)')
self.output_file.put_line('self.{0}.append(new_element)'.format(
SchemaGenerator.__get_array_name(get_name_for_field(element))
))
if mixed:
self.output_file.put_line('self.all_items.append(new_element)')
self.output_file.put_line('return True')
def __build_load_element(self, complex_type, base_class):
self.output_file.put_line('def load_element(self, element):')
with FileGenerator.Indent(self.output_file):
if base_class != 'object':
self.output_file.put_line('if super().load_element(element):')
with FileGenerator.Indent(self.output_file):
self.output_file.put_line('return True')
mixed = complex_type.hasAttribute('mixed') and string_to_bool(complex_type.getAttribute('mixed'))
for element in complex_type.getElementsByTagName('xs:element'):
self.__build_load_element_item(element, mixed)
if mixed:
self.output_file.put_line('if element.nodeType == element.TEXT_NODE:')
with FileGenerator.Indent(self.output_file):
self.output_file.put_line("cur_texts = [text.strip() for text in element.data.split('\\n')]")
self.output_file.put_line('first = True')
self.output_file.put_line('for text in cur_texts:')
with FileGenerator.Indent(self.output_file):
self.output_file.put_line(
'if first and self.all_items and type(self.all_items[-1]) is str:')
with FileGenerator.Indent(self.output_file):
self.output_file.put_line('self.all_items[-1] += text')
self.output_file.put_line('else:')
with FileGenerator.Indent(self.output_file):
self.output_file.put_line('self.all_items.append(text)')
self.output_file.put_line('first = False')
self.output_file.put_line('return True')
self.output_file.put_line('return False')
def __build_load_attributes(self, complex_type, base_class):
self.output_file.put_line('def load_attributes(self, dom_node):')
with FileGenerator.Indent(self.output_file):
load_is_empty = True
if base_class != 'object':
load_is_empty = False
self.output_file.put_line('super().load_attributes(dom_node)')
for attribute in complex_type.getElementsByTagName('xs:attribute'):
load_is_empty = False
self.output_file.put_line('if dom_node.hasAttribute("{0}"):'.format(attribute.getAttribute('name')))
with FileGenerator.Indent(self.output_file):
self.output_file.put_line('cur_attr = dom_node.getAttribute("{0}")'.format(
attribute.getAttribute('name')
))
if attribute.getAttribute('type') == 'xs:string':
self.output_file.put_line('self.{0} = cur_attr'.format(
get_name_for_field(attribute)
))
elif attribute.getAttribute('type') == 'xs:boolean':
self.output_file.put_line('self.{0} = string_to_bool(cur_attr)'.format(
get_name_for_field(attribute)
))
elif attribute.getAttribute('type') == 'xs:integer':
self.output_file.put_line('self.{0} = string_to_int(cur_attr)'.format(
get_name_for_field(attribute)
))
else:
self.output_file.put_line('self.{0} = {1}.load(cur_attr)'.format(
get_name_for_field(attribute),
attribute.getAttribute('type')
))
self.output_file.put_line('self.{0}_filled = True'.format(get_name_for_field(attribute)))
if load_is_empty:
self.output_file.put_line('pass')
def __build_load(self):
self.output_file.put_line('def load(self, dom_node):')
with FileGenerator.Indent(self.output_file):
self.output_file.put_line('for element in dom_node.childNodes:')
with FileGenerator.Indent(self.output_file):
self.output_file.put_line('self.load_element(element)')
self.output_file.put_line('self.load_attributes(dom_node)')
def main():
print(
'Xsd2Python3 Copyright (C) 2015 Petr Petrovich Petrov\n'
'This program comes with ABSOLUTELY NO WARRANTY;\n'
'This is free software, and you are welcome to redistribute it\n'
'under certain conditions.\n')
parser = argparse.ArgumentParser(
prog='Xsd2Python3',
description='This program converts XSD schemas to Python 3 data structures and parser scripts.')
parser.add_argument(
'-i', '--input', nargs=None, default='capi.xsd', metavar='INPUT',
help='specifies input API description file')
parser.add_argument(
'-o', '--output', nargs=None, default='Parser.py', metavar='OUTPUT',
help='specifies generated output Python 3 file')
args = parser.parse_args()
schema_generator = SchemaGenerator(args.input, args.output)
schema_generator.build_python_scripts()
if __name__ == '__main__':
main()
| PetrPPetrov/beautiful-capi | source/Xsd2Python3.py | Python | gpl-3.0 | 14,443 |
#
# This file is part of Dragonfly.
# (c) Copyright 2007, 2008 by Christo Butcher
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragonfly. If not, see
# <http://www.gnu.org/licenses/>.
#
"""
Engine class for CMU Pocket Sphinx
"""
import contextlib
import logging
import os
import time
import wave
from six import text_type, PY2
from dragonfly import Window
from .dictation import SphinxDictationContainer
from .recobs import SphinxRecObsManager
from .timer import SphinxTimerManager
from .training import write_training_data, write_transcript_files
from ..base import EngineBase, EngineError, MimicFailure
try:
from jsgf import RootGrammar, PublicRule, Literal
from sphinxwrapper import PocketSphinx
from .compiler import SphinxJSGFCompiler
from .grammar_wrapper import GrammarWrapper
from .misc import (EngineConfig, WaveRecognitionObserver,
get_decoder_config_object)
from .recording import PyAudioRecorder
except ImportError:
# Import a few things here optionally for readability (the engine won't
# start without them) and so that autodoc can import this module without
# them.
pass
class UnknownWordError(Exception):
pass
class SphinxEngine(EngineBase):
""" Speech recognition engine back-end for CMU Pocket Sphinx. """
_name = "sphinx"
DictationContainer = SphinxDictationContainer
def __init__(self):
EngineBase.__init__(self)
# Set up the engine logger
logging.basicConfig()
try:
import sphinxwrapper
import jsgf
import pyaudio
except ImportError:
self._log.error("%s: Failed to import jsgf, pyaudio and/or "
"sphinxwrapper. Are they installed?" % self)
raise EngineError("Failed to import Pocket Sphinx engine "
"dependencies.")
# Set the default engine configuration.
# This can be changed later using the config property.
self._config = None
self.config = EngineConfig
# Set other variables
self._decoder = None
self._audio_buffers = []
self.compiler = SphinxJSGFCompiler()
self._recognition_observer_manager = SphinxRecObsManager(self)
self._keyphrase_thresholds = {}
self._keyphrase_functions = {}
self._training_session_active = False
self._default_search_result = None
# Timer-related members.
self._timer_manager = SphinxTimerManager(0.02, self)
self._timer_callback = None
self._timer_interval = None
self._timer_next_time = 0
# Set up keyphrase search names and valid search names for grammars.
self._keyphrase_search_names = ["_key_phrases", "_wake_phrase"]
self._valid_searches = set()
# Recognising loop members.
self._recorder = PyAudioRecorder(self.config)
self._cancel_recognition_next_time = False
self._recognising = False
self._recognition_paused = False
@property
def config(self):
"""
Python module/object containing engine configuration.
You will need to restart the engine with :meth:`disconnect` and
:meth:`connect` if the configuration has been changed after
:meth:`connect` has been called.
:returns: config module/object
"""
return self._config
@config.setter
def config(self, value):
# Validate configuration object.
self.validate_config(value)
self._config = value
@classmethod
def validate_config(cls, engine_config):
# Check configuration options and set defaults where appropriate.
# Set a new decoder config if necessary.
if not hasattr(engine_config, "DECODER_CONFIG"):
setattr(engine_config, "DECODER_CONFIG",
get_decoder_config_object())
options = [
"LANGUAGE",
"START_ASLEEP",
"WAKE_PHRASE",
"WAKE_PHRASE_THRESHOLD",
"SLEEP_PHRASE",
"SLEEP_PHRASE_THRESHOLD",
"TRAINING_DATA_DIR",
"TRANSCRIPT_NAME",
"START_TRAINING_PHRASE",
"START_TRAINING_PHRASE_THRESHOLD",
"END_TRAINING_PHRASE",
"END_TRAINING_PHRASE_THRESHOLD",
"CHANNELS",
"RATE",
"SAMPLE_WIDTH",
"FRAMES_PER_BUFFER",
]
# Get default values and set them they are missing.
for option in options:
if hasattr(engine_config, option):
continue
default_value = getattr(EngineConfig, option)
if "PHRASE" in option:
# Disable missing phrases by default if using a language
# other than English.
if not engine_config.LANGUAGE.startswith("en"):
default_value = "" if option.endswith("PHRASE") else 0.0
setattr(engine_config, option, default_value)
def connect(self):
"""
Set up the CMU Pocket Sphinx decoder.
This method does nothing if the engine is already connected.
"""
if self._decoder:
return
# Initialise a new decoder with the given configuration
decoder_config = self._config.DECODER_CONFIG
self._decoder = PocketSphinx(decoder_config)
self._valid_searches.add(self._default_search_name)
# Set up callback function wrappers
def hypothesis(hyp):
# Set default search result.
self._default_search_result = hyp
# Set speech to the hypothesis string or None if there isn't one
speech = hyp.hypstr if hyp else None
return self._hypothesis_callback(speech, False)
def speech_start():
# Reset the default search result and call the engine's callback
# method.
self._default_search_result = None
return self._speech_start_callback(False)
self._decoder.hypothesis_callback = hypothesis
self._decoder.speech_start_callback = speech_start
# Set up built-in keyphrases if they set. Catch and log any
# UnknownWordErrors because all keyphrases are optional.
def get_phrase_values(name):
phrase_attr = name + "_PHRASE"
threshold_attr = name + "_PHRASE_THRESHOLD"
return (getattr(self.config, phrase_attr, ""),
getattr(self.config, threshold_attr, 0))
def safe_set_keyphrase(name, func):
phrase, threshold = get_phrase_values(name)
if phrase and threshold:
try:
self.set_keyphrase(phrase, threshold, func)
except UnknownWordError as e:
self._log.error(e)
# Set the wake phrase using set_kws_list directly because it uses a
# different search.
wake_phrase, wake_threshold = get_phrase_values("WAKE")
if wake_phrase and wake_threshold:
try:
self._validate_words(wake_phrase.split(), "keyphrase")
self._decoder.set_kws_list("_wake_phrase", {
wake_phrase: wake_threshold
})
except UnknownWordError as e:
self._log.error(e)
# Set the other keyphrases using safe_set_keyphrase().
safe_set_keyphrase("SLEEP", self.pause_recognition)
safe_set_keyphrase("START_TRAINING",
self.start_training_session)
safe_set_keyphrase("END_TRAINING",
self.end_training_session)
# Set the PyAudioRecorder instance's config object.
self._recorder.config = self.config
# Start in sleep mode if requested.
if self.config.START_ASLEEP:
self.pause_recognition()
self._log.warning("Starting in sleep mode as requested.")
def _free_engine_resources(self):
"""
Internal method for freeing the resources used by the engine.
"""
# Stop the audio recorder if it is running.
self._recognising = False
self._recorder.stop()
# Free the decoder and clear audio buffers.
self._decoder = None
self._audio_buffers = []
# Reset other variables
self._cancel_recognition_next_time = False
self._training_session_active = False
self._recognition_paused = False
# Clear dictionaries and sets
self._grammar_wrappers.clear()
self._valid_searches.clear()
self._keyphrase_thresholds.clear()
self._keyphrase_functions.clear()
def disconnect(self):
"""
Deallocate the CMU Sphinx decoder and any other resources used by
it.
This method effectively unloads all loaded grammars and key
phrases.
"""
# Free resources if the decoder isn't currently being used to
# recognise, otherwise stop the recognising loop, which will free
# the resources safely.
if not self.recognising:
self._free_engine_resources()
else:
self._recognising = False
self._recorder.stop()
# -----------------------------------------------------------------------
# Multiplexing timer methods.
def create_timer(self, callback, interval):
"""
Create and return a timer using the specified callback and repeat
interval.
**Note**: Timers will not run unless the engine is recognising
audio. Normal threads can be used instead with no downsides.
"""
if not self.recognising:
self._log.warning("Timers will not run unless the engine is "
"recognising audio.")
return super(SphinxEngine, self).create_timer(callback, interval)
def set_timer_callback(self, callback, sec):
""""""
# This method should really only be called by the timer manager, not
# directly.
self._timer_callback = callback
self._timer_interval = sec
self._timer_next_time = time.time()
def _call_timer_callback(self):
if not (callable(self._timer_callback) or self._timer_interval):
return
now = time.time()
if self._timer_next_time < now:
self._timer_next_time = now + self._timer_interval
self._timer_callback()
# -----------------------------------------------------------------------
# Methods for working with grammars.
def check_valid_word(self, word):
"""
Check if a word is in the current Sphinx pronunciation dictionary.
This will always return False if :meth:`connect` hasn't been called.
:rtype: bool
"""
if self._decoder:
return bool(self._decoder.lookup_word(word))
return False
def _validate_words(self, words, search_type):
unknown_words = []
# Use 'set' to de-duplicate the 'words' list.
for word in set(words):
if not self.check_valid_word(word):
unknown_words.append(word)
if unknown_words:
# Sort the word list before using it.
unknown_words.sort()
raise UnknownWordError(
"%s used words not found in the pronunciation dictionary: "
"%s" % (search_type, ", ".join(unknown_words)))
def _build_grammar_wrapper(self, grammar):
return GrammarWrapper(grammar, self,
self._recognition_observer_manager)
def _set_grammar(self, wrapper, activate, partial=False):
if not wrapper:
return
# Connect to the engine if it isn't connected already.
self.connect()
def activate_search_if_necessary():
if activate:
self._decoder.end_utterance()
self._decoder.active_search = wrapper.search_name
# Check if the wrapper's search name is valid.
# Set the search (again) if necessary.
valid_search = wrapper.search_name in self._valid_searches
if valid_search and not wrapper.set_search:
# wrapper.search_name is a valid search, so return.
activate_search_if_necessary()
return
# Return early if 'partial' is True as an optimisation to avoid
# recompiling grammars for every rule activation/deactivation.
# Also return if the search doesn't need to be set.
if partial or not wrapper.set_search:
return
# Compile and set the jsgf search.
compiled = wrapper.compile_jsgf()
# Raise an error if there are no active public rules.
if "public <root> = " not in compiled:
raise EngineError("no public rules found in the grammar")
# Check that each word in the grammar is in the pronunciation
# dictionary. This will raise an UnknownWordError if one or more
# aren't.
self._validate_words(wrapper.grammar_words,
"grammar '%s'" % wrapper.grammar.name)
# Set the JSGF search.
self._decoder.end_utterance()
self._decoder.set_jsgf_string(wrapper.search_name, compiled)
activate_search_if_necessary()
# Grammar search has been loaded, so set the wrapper's flag.
wrapper.set_search = False
def _unset_search(self, name):
# Unset a Pocket Sphinx search with the given name.
# Don't unset the default or keyphrase searches.
default_search = self._default_search_name
reserved = [default_search] + self._keyphrase_search_names
if name in reserved:
return
# Unset the Pocket Sphinx search.
if name in self._valid_searches:
# Unfortunately, the C function for doing this (ps_unset_search)
# is not exposed. Pocket Sphinx searches are pretty lighweight
# however. This would only be an issue on hardware with limited
# memory.
# Remove the search from the valid searches set.
self._valid_searches.remove(name)
# Change to the default search to avoid possible segmentation faults
# from Pocket Sphinx which crash Python.
self._set_default_search()
# TODO Add optional context parameter
def set_keyphrase(self, keyphrase, threshold, func):
"""
Add a keyphrase to listen for.
Key phrases take precedence over grammars as they are processed first.
They cannot be set for specific contexts (yet).
:param keyphrase: keyphrase to add.
:param threshold: keyphrase threshold value to use.
:param func: function or method to call when the keyphrase is heard.
:type keyphrase: str
:type threshold: float
:type func: callable
:raises: UnknownWordError
"""
# Check that all words in the keyphrase are in the pronunciation dictionary.
# This can raise an UnknownWordError.
self._validate_words(keyphrase.split(), "keyphrase")
# Check that the threshold is a float.
if not isinstance(threshold, float):
raise TypeError("threshold must be a float, not %s" % threshold)
# Add parameters to the relevant dictionaries.
self._keyphrase_thresholds[keyphrase] = threshold
self._keyphrase_functions[keyphrase] = func
# Set the keyphrase search (again)
self._decoder.end_utterance()
self._decoder.set_kws_list("_key_phrases", self._keyphrase_thresholds)
def unset_keyphrase(self, keyphrase):
"""
Remove a set keyphrase so that the engine no longer listens for it.
:param keyphrase: keyphrase to remove.
:type keyphrase: str
"""
# Remove parameters from the relevant dictionaries. Don't raise an error
# if there is no such keyphrase.
self._keyphrase_thresholds.pop(keyphrase, None)
self._keyphrase_functions.pop(keyphrase, None)
# Set the keyphrase search (again)
self._decoder.end_utterance()
self._decoder.set_kws_list("_key_phrases", self._keyphrase_thresholds)
def _set_default_search(self):
# Change the active search to the one used for processing speech as
# it is heard.
swap_to_wake_search = (
self.recognition_paused and self.config.WAKE_PHRASE and
self.config.WAKE_PHRASE_THRESHOLD
)
# Ensure we're not processing.
self._decoder.end_utterance()
if swap_to_wake_search:
self._decoder.active_search = "_wake_phrase"
else:
self._decoder.active_search = self._default_search_name
def _load_grammar(self, grammar):
""" Load the given *grammar* and return a wrapper. """
self._log.debug("Engine %s: loading grammar %s."
% (self, grammar.name))
grammar.engine = self
# Dependency checking.
memo = []
for r in grammar.rules:
for d in r.dependencies(memo):
grammar.add_dependency(d)
wrapper = self._build_grammar_wrapper(grammar)
# Check that the engine doesn't already have a grammar with the same
# search name. This will include grammars with the same reference
# name, e.g. "some grammar" and "some_grammar".
if wrapper.search_name in self._valid_searches:
message = "Failed to load grammar %s: multiple grammars with " \
"the same name are not allowed" % grammar
self._log.error(message)
raise EngineError(message)
# Attempt to set the grammar search.
try:
self._set_grammar(wrapper, False)
except UnknownWordError as e:
# Unknown words should be logged as plain error messages, not
# exception stack traces.
self._log.error(e)
raise EngineError("Failed to load grammar %s: %s."
% (grammar, e))
except Exception as e:
self._log.exception("Failed to load grammar %s: %s."
% (grammar, e))
raise EngineError("Failed to load grammar %s: %s."
% (grammar, e))
# Set the grammar wrapper's search name as valid and return the
# wrapper.
self._valid_searches.add(wrapper.search_name)
return wrapper
def _unload_grammar(self, grammar, wrapper):
try:
# Unset the search names for the grammar.
self._unset_search(wrapper.search_name)
except Exception as e:
self._log.exception("Failed to unload grammar %s: %s."
% (grammar, e))
def activate_grammar(self, grammar):
self._log.debug("Activating grammar %s." % grammar.name)
def deactivate_grammar(self, grammar):
self._log.debug("Deactivating grammar %s." % grammar.name)
def activate_rule(self, rule, grammar):
self._log.debug("Activating rule %s in grammar %s."
% (rule.name, grammar.name))
wrapper = self._get_grammar_wrapper(grammar)
if not wrapper:
return
try:
wrapper.enable_rule(rule.name)
self._set_grammar(wrapper, False, True)
except UnknownWordError as e:
self._log.error(e)
except Exception as e:
self._log.exception("Failed to activate grammar %s: %s."
% (grammar, e))
def deactivate_rule(self, rule, grammar):
self._log.debug("Deactivating rule %s in grammar %s."
% (rule.name, grammar.name))
wrapper = self._get_grammar_wrapper(grammar)
if not wrapper:
return
try:
wrapper.disable_rule(rule.name)
self._set_grammar(wrapper, False, True)
except UnknownWordError as e:
self._log.error(e)
except Exception as e:
self._log.exception("Failed to activate grammar %s: %s."
% (grammar, e))
def update_list(self, lst, grammar):
wrapper = self._get_grammar_wrapper(grammar)
if not wrapper:
return
# Unfortunately there is no way to update lists for Pocket Sphinx
# without reloading the grammar, so we'll update the list's JSGF
# rule and reload.
wrapper.update_list(lst)
# Reload the grammar.
try:
self._set_grammar(wrapper, False)
except Exception as e:
self._log.exception("Failed to update list %s: %s."
% (lst, e))
def set_exclusiveness(self, grammar, exclusive):
# Disable/enable each grammar.
for g in self.grammars:
if exclusive:
g.disable()
else:
g.enable()
# Enable the specified grammar if it was supposed to be exclusive.
if exclusive:
grammar.enable()
# -----------------------------------------------------------------------
# Miscellaneous methods.
@property
def recognising(self):
"""
Whether the engine is currently recognising speech.
To stop recognition, use :meth:`disconnect`.
:rtype: bool
"""
return self._recorder.recording or self._recognising
@property
def default_search_result(self):
"""
The last hypothesis object of the default search.
This does not currently reach recognition observers because it is
intended to be used for dictation results, which are currently
disabled. Nevertheless this object can be useful sometimes.
:returns: Sphinx Hypothesis object | None
"""
return self._default_search_result
@property
def _default_search_name(self):
# The name of the Pocket Sphinx search used for processing speech as
# it is heard.
return "_default"
def _get_best_hypothesis(self, hypotheses):
"""
Take a list of speech hypotheses and return the most likely one.
:type hypotheses: iterable
:return: str | None
"""
# Get all distinct, non-null hypotheses.
distinct = tuple([h for h in set(hypotheses) if bool(h)])
if not distinct:
return None
elif len(distinct) == 1:
return distinct[0] # only one choice
# Decide between non-null hypotheses using a Pocket Sphinx search with
# each hypothesis as a grammar rule.
grammar = RootGrammar()
grammar.language_name = self.language
for i, hypothesis in enumerate(distinct):
grammar.add_rule(PublicRule("rule%d" % i, Literal(hypothesis)))
compiled = grammar.compile_grammar()
name = "_temp"
# Store the current search name.
original = self._decoder.active_search
# Note that there is no need to validate words in this case because
# each literal in the _temp grammar came from a Pocket Sphinx
# hypothesis.
self._decoder.end_utterance()
self._decoder.set_jsgf_string(name, compiled)
self._decoder.active_search = name
# Do the processing.
hyp = self._decoder.batch_process(
self._audio_buffers,
use_callbacks=False
)
result = hyp.hypstr if hyp else None
# Switch back to the previous search.
self._decoder.end_utterance() # just in case
self._decoder.active_search = original
return result
def _speech_start_callback(self, mimicking):
# Get context info. Dragonfly has a handy static method for this:
fg_window = Window.get_foreground()
# Call process_begin for all grammars so that any out of context
# grammar will not be used.
for wrapper in self._grammar_wrappers.values():
wrapper.process_begin(fg_window)
if not mimicking:
# Trim excess audio buffers from the start of the list. Keep a maximum 1
# second of silence before speech start was detected. This should help
# increase the performance of batch reprocessing later.
chunk = self.config.FRAMES_PER_BUFFER
rate = self.config.RATE
seconds = 1
n_buffers = int(rate / chunk * seconds)
self._audio_buffers = self._audio_buffers[-1 * n_buffers:]
# Notify observers
self._recognition_observer_manager.notify_begin()
def _hypothesis_callback(self, speech, mimicking):
"""
Internal Pocket Sphinx hypothesis callback method. Calls _process_hypothesis
and does post-processing afterwards.
:param speech: speech hypothesis
:type speech: str | None
:param mimicking: whether to treat speech as mimicked speech.
:rtype: bool
"""
# Clear any recorded audio buffers.
self._recorder.clear_buffers()
# Process speech. We should get back a boolean for whether processing
# occurred as well as the final speech hypothesis.
processing_occurred, final_speech = self._process_hypotheses(
speech, mimicking
)
# Notify observers of failure.
if not processing_occurred:
self._recognition_observer_manager.notify_failure()
# Write the training data files if necessary.
data_dir = self.config.TRAINING_DATA_DIR
if not mimicking and data_dir and os.path.isdir(data_dir):
# Use the default search's hypothesis if final_speech was nil.
if not final_speech:
final_speech = speech
try:
write_training_data(self.config, self._audio_buffers,
final_speech)
except Exception as e:
self._log.exception("Failed to write training data: %s" % e)
# Clear audio buffer list because utterance processing has finished.
self._audio_buffers = []
# Ensure that the correct search is used.
self._set_default_search()
# Return whether processing occurred in case this method was called
# by mimic.
return processing_occurred
def _process_key_phrases(self, speech, mimicking):
"""
Processing key phrase searches and return the matched keyphrase
(if any).
:type speech: str
:param mimicking: whether to treat speech as mimicked speech.
:rtype: str
"""
# Return if speech is empty/null or if there are no key phrases set.
if not (speech and self._keyphrase_thresholds):
return "" # no matches
if not mimicking:
# Reprocess using the key phrases search
self._decoder.end_utterance()
self._decoder.active_search = "_key_phrases"
hyp = self._decoder.batch_process(self._audio_buffers,
use_callbacks=False)
# Get the hypothesis string.
speech = hyp.hypstr if hyp else ""
# Restore search to the default search.
self._set_default_search()
# Return if no key phrase matched.
if not speech:
return ""
# Handle multiple matching key phrases. This appears to be a
# quirk of how Pocket Sphinx 'kws' searches work. Get the best
# match instead if this is the case.
recognised_phrases = speech.split(" ")
if len(recognised_phrases) > 1:
# Remove trailing space from the last phrase.
recognised_phrases[len(recognised_phrases) - 1].rstrip()
speech = self._get_best_hypothesis(recognised_phrases)
else:
speech = speech.rstrip() # remove trailing whitespace.
# Notify observers if a keyphrase was matched.
result = speech if speech in self._keyphrase_functions else ""
if result:
words = tuple(result.split())
self._recognition_observer_manager.notify_recognition(words)
# Call the registered function if there was a match and the function
# is callable.
func = self._keyphrase_functions.get(speech, None)
if callable(func):
try:
func()
except Exception as e:
self._log.exception(
"Exception caught when executing the function for "
"keyphrase '%s': %s" % (speech, e)
)
return result
@classmethod
def _generate_words_rules(cls, words, mimicking):
# Convert words to Unicode, treat all uppercase words as dictation
# words and other words as grammar words.
# Minor note: this won't work for languages without capitalisation.
result = []
for word in words.split():
if PY2 and isinstance(word, str):
word = text_type(word, encoding="utf-8")
if word.isupper() and mimicking:
# Convert dictation words to lowercase for consistent
# output.
result.append((word.lower(), 1000000))
else:
result.append((word, 0))
return tuple(result)
def _process_hypotheses(self, speech, mimicking):
"""
Internal method to process speech hypotheses. This should only be called
from 'SphinxEngine._hypothesis_callback' because that method does important
post processing.
:param speech: speech
:param mimicking: whether to treat speech as mimicked speech.
:rtype: tuple
"""
# Check key phrases search first.
keyphrase = self._process_key_phrases(speech, mimicking)
if keyphrase:
# Keyphrase search matched.
return True, keyphrase
# Otherwise do grammar processing.
processing_occurred = False
hypotheses = {}
# Collect each active grammar's GrammarWrapper.
wrappers = [w for w in self._grammar_wrappers.values()
if w.grammar_active]
# No grammar has been loaded.
if not wrappers:
# TODO What should we do here? Output formatted Dictation like DNS?
return processing_occurred, speech
# Batch process audio buffers for each active grammar. Store each
# hypothesis.
for wrapper in wrappers:
if mimicking:
# Just use 'speech' for everything if mimicking.
hyp = speech
else:
# Switch to the search for this grammar and re-process the
# audio.
self._set_grammar(wrapper, True)
hyp = self._decoder.batch_process(
self._audio_buffers,
use_callbacks=False
)
if hyp:
hyp = hyp.hypstr
# Set the hypothesis in the dictionary.
hypotheses[wrapper.search_name] = hyp
# Get the best hypothesis.
speech = self._get_best_hypothesis(list(hypotheses.values()))
if not speech:
return processing_occurred, speech
# Process speech using the first matching grammar.
words_rules = self._generate_words_rules(speech, mimicking)
for wrapper in wrappers:
if hypotheses[wrapper.search_name] != speech:
continue
processing_occurred = wrapper.process_words(words_rules)
if processing_occurred:
break
# Return whether processing occurred and the final speech hypothesis for
# post processing.
return processing_occurred, speech
def process_buffer(self, buf):
"""
Recognise speech from an audio buffer.
This method is meant to be called in sequence for multiple audio
buffers. It will do nothing if :meth:`connect` hasn't been called.
:param buf: audio buffer
:type buf: str
"""
if not self._decoder:
return
# Cancel current recognition if it has been requested.
if self._cancel_recognition_next_time:
self._decoder.end_utterance()
self._audio_buffers = []
self._cancel_recognition_next_time = False
# Keep a list of buffers for possible reprocessing using different Pocket
# Sphinx searches later.
self._audio_buffers.append(buf)
# Call the timer callback if it is set.
self._call_timer_callback()
# Process audio.
try:
self._recognising = True
self._decoder.process_audio(buf)
finally:
self._recognising = False
def process_wave_file(self, path):
"""
Recognise speech from a wave file and return the recognition results.
This method checks that the wave file is valid. It raises an error
if the file doesn't exist, if it can't be read or if the WAV header
values do not match those in the engine configuration.
If recognition is paused (sleep mode), this method will call
:meth:`resume_recognition`.
The wave file must use the same sample width, sample rate and number
of channels that the acoustic model uses.
If the file is valid, :meth:`process_buffer` is then used to process
the audio.
Multiple utterances are supported.
:param path: wave file path
:raises: IOError | OSError | ValueError
:returns: recognition results
:rtype: generator
"""
if not self._decoder:
self.connect()
# This method's implementation has been adapted from the PyAudio
# play wave example:
# http://people.csail.mit.edu/hubert/pyaudio/#play-wave-example
# Check that path is a valid file.
if not os.path.isfile(path):
raise IOError("'%s' is not a file. Please use a different file path.")
# Get required audio configuration from the engine config.
channels, sample_width, rate, chunk = (
self.config.CHANNELS,
self.config.SAMPLE_WIDTH,
self.config.RATE,
self.config.FRAMES_PER_BUFFER
)
# Make sure recognition is not paused.
if self.recognition_paused:
self.resume_recognition(notify=False)
# Open the wave file. Use contextlib to make sure that the file is
# closed whether errors are raised or not.
# Also register a custom recognition observer for the duration.
obs = WaveRecognitionObserver(self)
with contextlib.closing(wave.open(path, "rb")) as wf, obs as obs:
# Validate the wave file's header.
if wf.getnchannels() != channels:
message = ("WAV file '%s' should use %d channel(s), not %d!"
% (path, channels, wf.getnchannels()))
elif wf.getsampwidth() != sample_width:
message = ("WAV file '%s' should use sample width %d, not "
"%d!" % (path, sample_width, wf.getsampwidth()))
elif wf.getframerate() != rate:
message = ("WAV file '%s' should use sample rate %d, not "
"%d!" % (path, rate, wf.getframerate()))
else:
message = None
if message:
raise ValueError(message)
# Use process_buffer to process each buffer.
for _ in range(0, int(wf.getnframes() / chunk) + 1):
data = wf.readframes(chunk)
if not data:
break
self.process_buffer(data)
# Get the results from the observer.
if obs.words:
yield obs.words
obs.words = ""
# Log warnings if speech start or end weren't detected.
if not obs.complete:
self._log.warning("Speech start/end wasn't detected in the wave "
"file!")
self._log.warning("Perhaps the Sphinx '-vad_prespeech' value "
"should be higher?")
self._log.warning("Or maybe '-vad_startspeech' or "
"'-vad_postspeech' should be lower?")
def recognise_forever(self):
"""
Start recognising from the default recording device until
:meth:`disconnect` is called.
Recognition can be paused and resumed using either the sleep/wake
key phrases or by calling :meth:`pause_recognition` or
:meth:`resume_recognition`.
To configure audio input settings, modify the engine's ``CHANNELS``,
``RATE``, ``SAMPLE_WIDTH`` and/or ``FRAMES_PER_BUFFER``
configuration options.
"""
if not self._decoder:
self.connect()
# Start recognising in a loop.
self._recorder.start()
self._cancel_recognition_next_time = False
while self.recognising:
for buf in self._recorder.get_buffers():
self.process_buffer(buf)
# Free engine resources after recognition has stopped.
self._free_engine_resources()
def mimic(self, words):
""" Mimic a recognition of the given *words* """
if isinstance(words, (list, tuple)):
words = " ".join(words)
if self.recognition_paused and words == self.config.WAKE_PHRASE:
self.resume_recognition()
return
# Pretend that Sphinx has started processing speech
self._speech_start_callback(True)
# Process the words as if they were spoken
result = self._hypothesis_callback(words, True)
if not result:
raise MimicFailure("No matching rule found for words %s."
% words)
def mimic_phrases(self, *phrases):
"""
Mimic a recognition of the given *phrases*.
This method accepts variable phrases instead of a list of words.
"""
# Pretend that Sphinx has started processing speech
self._speech_start_callback(True)
# Process phrases as if they were spoken
wake_phrase = self.config.WAKE_PHRASE
for phrase in phrases:
if self.recognition_paused and phrase == wake_phrase:
self.resume_recognition()
continue
result = self._hypothesis_callback(phrase, True)
if not result:
raise MimicFailure("No matching rule found for words %s."
% phrase)
def speak(self, text):
""""""
self._log.warning("text-to-speech is not implemented for this "
"engine.")
self._log.warning("Printing text instead.")
print(text)
def _get_language(self):
return self.config.LANGUAGE
# ----------------------------------------------------------------------
# Training-related methods
def write_transcript_files(self, fileids_path, transcription_path):
"""
Write .fileids and .transcription files for files in the training
data directory and write them to the specified file paths.
This method will raise an error if the ``TRAINING_DATA_DIR``
configuration option is not set to an existing directory.
:param fileids_path: path to .fileids file to create.
:param transcription_path: path to .transcription file to create.
:type fileids_path: str
:type transcription_path: str
:raises: IOError | OSError
"""
write_transcript_files(
self.config, fileids_path, transcription_path
)
@property
def training_session_active(self):
"""
Whether a training session is in progress.
:rtype: bool
"""
return self._training_session_active
def start_training_session(self):
"""
Start the training session. This will stop recognition processing
until either :meth:`end_training_session` is called or the end
training keyphrase is heard.
"""
data_dir = self.config.TRAINING_DATA_DIR
if not data_dir or not os.path.isdir(data_dir):
self._log.warning("Training data will not be recorded; '%s' is "
"not a directory" % data_dir)
if not self._training_session_active:
self._log.info("Training session has started. No rule "
"actions will be processed. ")
self._log.info("Say '%s' to end the session."
% self.config.END_TRAINING_PHRASE)
self._training_session_active = True
def end_training_session(self):
"""
End the training if one is in progress. This will allow recognition
processing once again.
"""
if self._training_session_active:
self._log.info("Ending training session.")
self._log.info("Rule actions will now be processed normally "
"again.")
self._training_session_active = False
# ----------------------------------------------------------------------
# Recognition loop control methods
# Stopping recognition loop is done using disconnect()
@property
def recognition_paused(self):
"""
Whether the engine is waiting for the wake phrase to be heard or for
:meth:`resume_recognition` to be called.
:rtype: bool
"""
return self._recognition_paused
def pause_recognition(self):
"""
Pause recognition and wait for :meth:`resume_recognition` to be
called or for the wake keyphrase to be spoken.
"""
if not self._decoder:
return
self._recognition_paused = True
# Switch to the wake keyphrase search if a wake keyphrase has been
# set.
self._set_default_search()
if not self.config.WAKE_PHRASE:
self._log.warning("No wake phrase has been set.")
self._log.warning("Use engine.resume_recognition() to wake up.")
# Define temporary callback for the decoder.
def hypothesis(hyp):
# Clear any recorded audio buffers.
self._recorder.clear_buffers()
s = hyp.hypstr if hyp else None
# Resume recognition if s is the wake keyphrase.
if s and s.strip() == self.config.WAKE_PHRASE.strip():
self.resume_recognition()
elif self.config.WAKE_PHRASE:
self._log.debug("Didn't hear %s" % self.config.WAKE_PHRASE)
# Clear audio buffers
self._audio_buffers = []
# Override decoder hypothesis callback.
self._decoder.hypothesis_callback = hypothesis
def resume_recognition(self, notify=True):
"""
Resume listening for grammar rules and key phrases.
"""
if not self._decoder:
return
self._recognition_paused = False
# Notify observers about recognition resume.
keyphrase = self.config.WAKE_PHRASE
words = tuple(keyphrase.strip().split())
if words and notify:
self._recognition_observer_manager.notify_recognition(words)
# Restore the callbacks to normal
def hypothesis(hyp):
# Set default search result.
self._default_search_result = hyp
# Set speech to the hypothesis string or None if there isn't one
speech = hyp.hypstr if hyp else None
return self._hypothesis_callback(speech, False)
self._decoder.hypothesis_callback = hypothesis
# Switch to the default search.
self._set_default_search()
def cancel_recognition(self):
"""
If a recognition was in progress, cancel it before processing the
next audio buffer.
"""
self._cancel_recognition_next_time = True
| tylercal/dragonfly | dragonfly/engines/backend_sphinx/engine.py | Python | lgpl-3.0 | 45,112 |
import itertools
from ieml.usl import USL, PolyMorpheme, check_polymorpheme
def check_lexeme(lexeme, sfun=None):
for pm in [lexeme.pm_flexion, lexeme.pm_content]:
if not isinstance(pm, PolyMorpheme):
raise ValueError("Invalid arguments to create a Lexeme, expects a Polymorpheme, not a {}."
.format(pm.__class__.__name__))
check_polymorpheme(pm)
# check_lexeme_scripts(lexeme.pm_flexion.constant,
# lexeme.pm_content.constant,
# sfun=sfun)
class Lexeme(USL):
"""A lexeme without the PA of the position on the tree (position independant lexeme)"""
syntactic_level = 2
def __init__(self, pm_flexion: PolyMorpheme, pm_content: PolyMorpheme):
super().__init__()
if not isinstance(pm_flexion, PolyMorpheme) or not isinstance(pm_content, PolyMorpheme):
raise ValueError("Invalid arguments to creates a lexeme object")
self.pm_flexion = pm_flexion
self.pm_content = pm_content
# self.address = PolyMorpheme(constant=[m for m in pm_address.constant if m in ADDRESS_SCRIPTS])
self.grammatical_class = self.pm_content.grammatical_class
self._str = []
for pm in [self.pm_content, self.pm_flexion]:
if not self._str and pm.empty:
continue
self._str.append("({})".format(str(pm)))
self._str = ''.join(reversed(self._str))
if not self._str:
self._str = "()"
def check(self):
pass
def iter_structure(self):
yield self.pm_flexion
yield from self.pm_flexion.iter_structure()
yield self.pm_content
yield from self.pm_content.iter_structure()
def iter_structure_path(self, flexion=False):
from ieml.usl.decoration.path import LexemePath, LexemeIndex
from ieml.usl.decoration.path import FlexionPath
yield (LexemePath(LexemeIndex.FLEXION), self.pm_flexion)
yield from [(LexemePath(LexemeIndex.FLEXION, child=None if path.morpheme is None else FlexionPath(path.morpheme)), pm)
for path, pm in self.pm_flexion.iter_structure_path()]
yield (LexemePath(LexemeIndex.CONTENT), self.pm_content)
yield from [(LexemePath(LexemeIndex.CONTENT, child=path), pm)
for path, pm in self.pm_content.iter_structure_path()]
@property
def empty(self):
return self.pm_content.empty and self.pm_flexion.empty
def do_lt(self, other):
return self.pm_flexion < other.pm_flexion or \
(self.pm_flexion == other.pm_flexion and self.pm_content < other.pm_content)
def _compute_singular_sequences(self):
if self.pm_flexion.is_singular and (self.pm_content is None or self.pm_content.is_singular):
return [self]
else:
_product = [self.pm_flexion,
self.pm_content]
_product = [p.singular_sequences for p in _product if p is not None]
return [Lexeme(*ss)
for ss in itertools.product(*_product)]
@property
def morphemes(self):
return sorted(set(self.pm_flexion.morphemes + self.pm_content.morphemes))
| IEMLdev/propositions-restful-server | ieml/usl/lexeme.py | Python | gpl-3.0 | 2,853 |
import sys
import gc
import numarray as _na
import time
# gc.set_threshold(1)
packages = [
"numarray.numtest",
"numarray.ieeespecial",
"numarray.records",
"numarray.strings",
"numarray.memmap",
"numarray.objects",
"numarray.memorytest",
"numarray.examples.convolve",
"numarray.convolve",
"numarray.fft",
"numarray.linear_algebra",
"numarray.image",
"numarray.nd_image",
"numarray.random_array",
"numarray.ma",
"numarray.matrix"]
def test():
if "gettotalrefcount" in dir(sys):
DEBUG = "debug"
else:
DEBUG = "normal"
print "Testing numarray",_na.__version__,"on",DEBUG,"Python",sys.version_info, "on platform", sys.platform
total_t0 = time.clock()
for p in packages:
exec("import " + p)
t0 = time.clock()
result = eval(p+".test()")
if result is not None:
print ("%-40s%5.2f %s" % (p + ":" , time.clock()-t0, result))
print "%-40s%5.2f" % ("Total time:", time.clock()-total_t0)
if __name__ == "__main__":
test()
| fxia22/ASM_xf | PythonD/site_python/numarray/testall.py | Python | gpl-2.0 | 1,079 |
"""Adds TRACE level logging, which is below DEBUG."""
import logging
import types
TRACE_LEVEL = 5
logging.addLevelName(TRACE_LEVEL, 'TRACE')
def _trace(self, msg, *args, **kwargs):
self.log(TRACE_LEVEL, msg, *args, **kwargs)
def getLogger(name):
logger = logging.getLogger(name)
if not hasattr(logger, 'trace'):
logger.trace = types.MethodType(_trace, logger)
return logger
| gamechanger/kafka-rest | kafka_rest/custom_logging.py | Python | mit | 405 |
__author__ = 'bruno'
def mark_component(graph, node, marked):
marked[node] = True
total_marked = 1
for neighbor in graph[node]:
if neighbor not in marked:
total_marked += mark_component(graph, neighbor, marked)
return total_marked
def list_component_number_of_vertices(graph):
marked = {}
components = {}
for node in graph.keys():
if node not in marked:
components[node] = mark_component(graph, node, marked)
return components | bnsantos/python-junk-code | algorithms/graphs/connectedComponents.py | Python | gpl-2.0 | 502 |
L: List[int] = list()
b: bool = bool(L)
# FINAL: L -> _@⊥; b -> [0, 0]; len(L) -> [0, 0]
| caterinaurban/Lyra | src/lyra/unittests/numerical/interval/forward/indexing3/empty.py | Python | mpl-2.0 | 92 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from dateutil.relativedelta import relativedelta
from odoo import api, fields, models, _
from odoo.osv import expression
class FleetVehicle(models.Model):
_inherit = ['mail.thread', 'mail.activity.mixin']
_name = 'fleet.vehicle'
_description = 'Vehicle'
_order = 'license_plate asc, acquisition_date asc'
def _get_default_state(self):
state = self.env.ref('fleet.fleet_vehicle_state_registered', raise_if_not_found=False)
return state if state and state.id else False
name = fields.Char(compute="_compute_vehicle_name", store=True)
description = fields.Text("Vehicle Description")
active = fields.Boolean('Active', default=True, tracking=True)
company_id = fields.Many2one('res.company', 'Company', default=lambda self: self.env.company)
currency_id = fields.Many2one('res.currency', related='company_id.currency_id')
license_plate = fields.Char(tracking=True,
help='License plate number of the vehicle (i = plate number for a car)')
vin_sn = fields.Char('Chassis Number', help='Unique number written on the vehicle motor (VIN/SN number)', copy=False)
driver_id = fields.Many2one('res.partner', 'Driver', tracking=True, help='Driver of the vehicle', copy=False)
future_driver_id = fields.Many2one('res.partner', 'Future Driver', tracking=True, help='Next Driver of the vehicle', copy=False, domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]")
model_id = fields.Many2one('fleet.vehicle.model', 'Model',
tracking=True, required=True, help='Model of the vehicle')
manager_id = fields.Many2one('res.users', compute='_compute_manager_id', domain=lambda self: [('groups_id', 'in', self.env.ref('fleet.fleet_group_manager').id)], store=True, readonly=False)
@api.depends('model_id')
def _compute_manager_id(self):
if self.model_id:
self.manager_id = self.model_id.manager_id
else:
self.manager_id = None
brand_id = fields.Many2one('fleet.vehicle.model.brand', 'Brand', related="model_id.brand_id", store=True, readonly=False)
log_drivers = fields.One2many('fleet.vehicle.assignation.log', 'vehicle_id', string='Assignment Logs')
log_services = fields.One2many('fleet.vehicle.log.services', 'vehicle_id', 'Services Logs')
log_contracts = fields.One2many('fleet.vehicle.log.contract', 'vehicle_id', 'Contracts')
contract_count = fields.Integer(compute="_compute_count_all", string='Contract Count')
service_count = fields.Integer(compute="_compute_count_all", string='Services')
odometer_count = fields.Integer(compute="_compute_count_all", string='Odometer')
history_count = fields.Integer(compute="_compute_count_all", string="Drivers History Count")
next_assignation_date = fields.Date('Assignment Date', help='This is the date at which the car will be available, if not set it means available instantly')
acquisition_date = fields.Date('Immatriculation Date', required=False,
default=fields.Date.today, help='Date when the vehicle has been immatriculated')
first_contract_date = fields.Date(string="First Contract Date", default=fields.Date.today)
color = fields.Char(help='Color of the vehicle')
state_id = fields.Many2one('fleet.vehicle.state', 'State',
default=_get_default_state, group_expand='_read_group_stage_ids',
tracking=True,
help='Current state of the vehicle', ondelete="set null")
location = fields.Char(help='Location of the vehicle (garage, ...)')
seats = fields.Integer('Seats Number', help='Number of seats of the vehicle')
model_year = fields.Char('Model Year', help='Year of the model')
doors = fields.Integer('Doors Number', help='Number of doors of the vehicle', default=5)
tag_ids = fields.Many2many('fleet.vehicle.tag', 'fleet_vehicle_vehicle_tag_rel', 'vehicle_tag_id', 'tag_id', 'Tags', copy=False)
odometer = fields.Float(compute='_get_odometer', inverse='_set_odometer', string='Last Odometer',
help='Odometer measure of the vehicle at the moment of this log')
odometer_unit = fields.Selection([
('kilometers', 'Kilometers'),
('miles', 'Miles')
], 'Odometer Unit', default='kilometers', help='Unit of the odometer ', required=True)
transmission = fields.Selection([('manual', 'Manual'), ('automatic', 'Automatic')], 'Transmission', help='Transmission Used by the vehicle')
fuel_type = fields.Selection([
('gasoline', 'Gasoline'),
('diesel', 'Diesel'),
('lpg', 'LPG'),
('electric', 'Electric'),
('hybrid', 'Hybrid')
], 'Fuel Type', help='Fuel Used by the vehicle')
horsepower = fields.Integer()
horsepower_tax = fields.Float('Horsepower Taxation')
power = fields.Integer('Power', help='Power in kW of the vehicle')
co2 = fields.Float('CO2 Emissions', help='CO2 emissions of the vehicle')
image_128 = fields.Image(related='model_id.image_128', readonly=True)
contract_renewal_due_soon = fields.Boolean(compute='_compute_contract_reminder', search='_search_contract_renewal_due_soon',
string='Has Contracts to renew')
contract_renewal_overdue = fields.Boolean(compute='_compute_contract_reminder', search='_search_get_overdue_contract_reminder',
string='Has Contracts Overdue')
contract_renewal_name = fields.Text(compute='_compute_contract_reminder', string='Name of contract to renew soon')
contract_renewal_total = fields.Text(compute='_compute_contract_reminder', string='Total of contracts due or overdue minus one')
car_value = fields.Float(string="Catalog Value (VAT Incl.)", help='Value of the bought vehicle')
net_car_value = fields.Float(string="Purchase Value", help="Purchase value of the vehicle")
residual_value = fields.Float()
plan_to_change_car = fields.Boolean(related='driver_id.plan_to_change_car', store=True, readonly=False)
vehicle_type = fields.Selection(related='model_id.vehicle_type')
@api.depends('model_id.brand_id.name', 'model_id.name', 'license_plate')
def _compute_vehicle_name(self):
for record in self:
record.name = (record.model_id.brand_id.name or '') + '/' + (record.model_id.name or '') + '/' + (record.license_plate or _('No Plate'))
def _get_odometer(self):
FleetVehicalOdometer = self.env['fleet.vehicle.odometer']
for record in self:
vehicle_odometer = FleetVehicalOdometer.search([('vehicle_id', '=', record.id)], limit=1, order='value desc')
if vehicle_odometer:
record.odometer = vehicle_odometer.value
else:
record.odometer = 0
def _set_odometer(self):
for record in self:
if record.odometer:
date = fields.Date.context_today(record)
data = {'value': record.odometer, 'date': date, 'vehicle_id': record.id}
self.env['fleet.vehicle.odometer'].create(data)
def _compute_count_all(self):
Odometer = self.env['fleet.vehicle.odometer']
LogService = self.env['fleet.vehicle.log.services']
LogContract = self.env['fleet.vehicle.log.contract']
for record in self:
record.odometer_count = Odometer.search_count([('vehicle_id', '=', record.id)])
record.service_count = LogService.search_count([('vehicle_id', '=', record.id)])
record.contract_count = LogContract.search_count([('vehicle_id', '=', record.id), ('state', '!=', 'closed')])
record.history_count = self.env['fleet.vehicle.assignation.log'].search_count([('vehicle_id', '=', record.id)])
@api.depends('log_contracts')
def _compute_contract_reminder(self):
params = self.env['ir.config_parameter'].sudo()
delay_alert_contract = int(params.get_param('hr_fleet.delay_alert_contract', default=30))
for record in self:
overdue = False
due_soon = False
total = 0
name = ''
for element in record.log_contracts:
if element.state in ('open', 'expired') and element.expiration_date:
current_date_str = fields.Date.context_today(record)
due_time_str = element.expiration_date
current_date = fields.Date.from_string(current_date_str)
due_time = fields.Date.from_string(due_time_str)
diff_time = (due_time - current_date).days
if diff_time < 0:
overdue = True
total += 1
if diff_time < delay_alert_contract:
due_soon = True
total += 1
if overdue or due_soon:
log_contract = self.env['fleet.vehicle.log.contract'].search([
('vehicle_id', '=', record.id),
('state', 'in', ('open', 'expired'))
], limit=1, order='expiration_date asc')
if log_contract:
# we display only the name of the oldest overdue/due soon contract
name = log_contract.name
record.contract_renewal_overdue = overdue
record.contract_renewal_due_soon = due_soon
record.contract_renewal_total = total - 1 # we remove 1 from the real total for display purposes
record.contract_renewal_name = name
def _get_analytic_name(self):
# This function is used in fleet_account and is overrided in l10n_be_hr_payroll_fleet
return self.license_plate or _('No plate')
def _search_contract_renewal_due_soon(self, operator, value):
params = self.env['ir.config_parameter'].sudo()
delay_alert_contract = int(params.get_param('hr_fleet.delay_alert_contract', default=30))
res = []
assert operator in ('=', '!=', '<>') and value in (True, False), 'Operation not supported'
if (operator == '=' and value is True) or (operator in ('<>', '!=') and value is False):
search_operator = 'in'
else:
search_operator = 'not in'
today = fields.Date.context_today(self)
datetime_today = fields.Datetime.from_string(today)
limit_date = fields.Datetime.to_string(datetime_today + relativedelta(days=+delay_alert_contract))
res_ids = self.env['fleet.vehicle.log.contract'].search([
('expiration_date', '>', today),
('expiration_date', '<', limit_date),
('state', 'in', ['open', 'expired'])
]).mapped('id')
res.append(('id', search_operator, res_ids))
return res
def _search_get_overdue_contract_reminder(self, operator, value):
res = []
assert operator in ('=', '!=', '<>') and value in (True, False), 'Operation not supported'
if (operator == '=' and value is True) or (operator in ('<>', '!=') and value is False):
search_operator = 'in'
else:
search_operator = 'not in'
today = fields.Date.context_today(self)
res_ids = self.env['fleet.vehicle.log.contract'].search([
('expiration_date', '!=', False),
('expiration_date', '<', today),
('state', 'in', ['open', 'expired'])
]).mapped('id')
res.append(('id', search_operator, res_ids))
return res
@api.model
def create(self, vals):
res = super(FleetVehicle, self).create(vals)
if 'driver_id' in vals and vals['driver_id']:
res.create_driver_history(vals['driver_id'])
if 'future_driver_id' in vals and vals['future_driver_id']:
state_waiting_list = self.env.ref('fleet.fleet_vehicle_state_waiting_list', raise_if_not_found=False)
states = res.mapped('state_id').ids
if not state_waiting_list or state_waiting_list.id not in states:
future_driver = self.env['res.partner'].browse(vals['future_driver_id'])
future_driver.sudo().write({'plan_to_change_car': True})
return res
def write(self, vals):
if 'driver_id' in vals and vals['driver_id']:
driver_id = vals['driver_id']
self.filtered(lambda v: v.driver_id.id != driver_id).create_driver_history(driver_id)
if 'future_driver_id' in vals and vals['future_driver_id']:
state_waiting_list = self.env.ref('fleet.fleet_vehicle_state_waiting_list', raise_if_not_found=False)
states = self.mapped('state_id').ids if 'state_id' not in vals else [vals['state_id']]
if not state_waiting_list or state_waiting_list.id not in states:
future_driver = self.env['res.partner'].browse(vals['future_driver_id'])
future_driver.sudo().write({'plan_to_change_car': True})
res = super(FleetVehicle, self).write(vals)
if 'active' in vals and not vals['active']:
self.mapped('log_contracts').write({'active': False})
return res
def _close_driver_history(self):
self.env['fleet.vehicle.assignation.log'].search([
('vehicle_id', 'in', self.ids),
('driver_id', 'in', self.mapped('driver_id').ids),
('date_end', '=', False)
]).write({'date_end': fields.Date.today()})
def create_driver_history(self, driver_id):
for vehicle in self:
self.env['fleet.vehicle.assignation.log'].create({
'vehicle_id': vehicle.id,
'driver_id': driver_id,
'date_start': fields.Date.today(),
})
def action_accept_driver_change(self):
self._close_driver_history()
# Find all the vehicles for which the driver is the future_driver_id
# remove their driver_id and close their history using current date
vehicles = self.search([('driver_id', 'in', self.mapped('future_driver_id').ids)])
vehicles.write({'driver_id': False})
vehicles._close_driver_history()
for vehicle in self:
vehicle.future_driver_id.sudo().write({'plan_to_change_car': False})
vehicle.driver_id = vehicle.future_driver_id
vehicle.future_driver_id = False
@api.model
def _read_group_stage_ids(self, stages, domain, order):
return self.env['fleet.vehicle.state'].search([], order=order)
@api.model
def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
if 'co2' in fields:
fields.remove('co2')
return super(FleetVehicle, self).read_group(domain, fields, groupby, offset, limit, orderby, lazy)
@api.model
def _name_search(self, name, args=None, operator='ilike', limit=100, name_get_uid=None):
args = args or []
if operator == 'ilike' and not (name or '').strip():
domain = []
else:
domain = ['|', ('name', operator, name), ('driver_id.name', operator, name)]
rec = self._search(expression.AND([domain, args]), limit=limit, access_rights_uid=name_get_uid)
return models.lazy_name_get(self.browse(rec).with_user(name_get_uid))
def return_action_to_open(self):
""" This opens the xml view specified in xml_id for the current vehicle """
self.ensure_one()
xml_id = self.env.context.get('xml_id')
if xml_id:
res = self.env['ir.actions.act_window'].for_xml_id('fleet', xml_id)
res.update(
context=dict(self.env.context, default_vehicle_id=self.id, group_by=False),
domain=[('vehicle_id', '=', self.id)]
)
return res
return False
def act_show_log_cost(self):
""" This opens log view to view and add new log for this vehicle, groupby default to only show effective costs
@return: the costs log view
"""
self.ensure_one()
copy_context = dict(self.env.context)
copy_context.pop('group_by', None)
res = self.env['ir.actions.act_window'].for_xml_id('fleet', 'fleet_vehicle_costs_action')
res.update(
context=dict(copy_context, default_vehicle_id=self.id, search_default_parent_false=True),
domain=[('vehicle_id', '=', self.id)]
)
return res
def _track_subtype(self, init_values):
self.ensure_one()
if 'driver_id' in init_values or 'future_driver_id' in init_values:
return self.env.ref('fleet.mt_fleet_driver_updated')
return super(FleetVehicle, self)._track_subtype(init_values)
def open_assignation_logs(self):
self.ensure_one()
return {
'type': 'ir.actions.act_window',
'name': 'Assignment Logs',
'view_mode': 'tree',
'res_model': 'fleet.vehicle.assignation.log',
'domain': [('vehicle_id', '=', self.id)],
'context': {'default_driver_id': self.driver_id.id, 'default_vehicle_id': self.id}
}
class FleetVehicleOdometer(models.Model):
_name = 'fleet.vehicle.odometer'
_description = 'Odometer log for a vehicle'
_order = 'date desc'
name = fields.Char(compute='_compute_vehicle_log_name', store=True)
date = fields.Date(default=fields.Date.context_today)
value = fields.Float('Odometer Value', group_operator="max")
vehicle_id = fields.Many2one('fleet.vehicle', 'Vehicle', required=True)
unit = fields.Selection(related='vehicle_id.odometer_unit', string="Unit", readonly=True)
driver_id = fields.Many2one(related="vehicle_id.driver_id", string="Driver", readonly=False)
@api.depends('vehicle_id', 'date')
def _compute_vehicle_log_name(self):
for record in self:
name = record.vehicle_id.name
if not name:
name = str(record.date)
elif record.date:
name += ' / ' + str(record.date)
record.name = name
@api.onchange('vehicle_id')
def _onchange_vehicle(self):
if self.vehicle_id:
self.unit = self.vehicle_id.odometer_unit
class FleetVehicleState(models.Model):
_name = 'fleet.vehicle.state'
_order = 'sequence asc'
_description = 'Vehicle Status'
name = fields.Char(required=True, translate=True)
sequence = fields.Integer(help="Used to order the note stages")
_sql_constraints = [('fleet_state_name_unique', 'unique(name)', 'State name already exists')]
class FleetVehicleTag(models.Model):
_name = 'fleet.vehicle.tag'
_description = 'Vehicle Tag'
name = fields.Char('Tag Name', required=True, translate=True)
color = fields.Integer('Color Index')
_sql_constraints = [('name_uniq', 'unique (name)', "Tag name already exists !")]
class FleetServiceType(models.Model):
_name = 'fleet.service.type'
_description = 'Fleet Service Type'
name = fields.Char(required=True, translate=True)
category = fields.Selection([
('contract', 'Contract'),
('service', 'Service')
], 'Category', required=True, help='Choose whether the service refer to contracts, vehicle services or both')
class FleetVehicleAssignationLog(models.Model):
_name = "fleet.vehicle.assignation.log"
_description = "Drivers history on a vehicle"
_order = "create_date desc, date_start desc"
vehicle_id = fields.Many2one('fleet.vehicle', string="Vehicle", required=True)
driver_id = fields.Many2one('res.partner', string="Driver", required=True)
date_start = fields.Date(string="Start Date")
date_end = fields.Date(string="End Date")
| ygol/odoo | addons/fleet/models/fleet_vehicle.py | Python | agpl-3.0 | 19,734 |
# Generated by Django 3.2.10 on 2021-12-25 19:08
from django.conf import settings
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('email', models.EmailField(max_length=255, unique=True, verbose_name='email address')),
('name', models.CharField(blank=True, default='', max_length=256)),
],
options={
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='BnPElement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tlg_element_id', models.PositiveIntegerField()),
('image_url', models.URLField(default='', max_length=512)),
('last_updated', models.DateTimeField(default=django.utils.timezone.now)),
('sold_out', models.BooleanField(default=False)),
('available', models.BooleanField(default=True)),
],
options={
'ordering': ('-last_updated',),
},
),
migrations.CreateModel(
name='BricklinkCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bl_id', models.PositiveIntegerField()),
('name', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='CatalogItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item_type', models.CharField(choices=[('set', 'Set'), ('part', 'Part'), ('minifig', 'Minifig'), ('gear', 'Gear'), ('book', 'Book'), ('bricklink_order', 'Bricklink order'), ('lugbulk_order', 'Lugbulk order'), ('bulklot', 'Bulk lot')], default='part', max_length=16)),
('name', models.CharField(max_length=256)),
('number', models.CharField(max_length=32)),
('no_inventory', models.BooleanField(default=False)),
('year_released', models.PositiveIntegerField(blank=True, null=True)),
('weight', models.DecimalField(blank=True, decimal_places=4, max_digits=9, null=True)),
('dimensions', models.CharField(blank=True, max_length=64, null=True)),
('bl_id', models.PositiveIntegerField(blank=True, default=0, null=True)),
('brickset_id', models.CharField(blank=True, default='', max_length=32)),
('tlg_number', models.PositiveIntegerField(null=True)),
('tlg_name', models.CharField(blank=True, default='', max_length=256)),
('other_names', models.JSONField(default=list)),
('other_numbers', models.JSONField(default=list)),
],
),
migrations.CreateModel(
name='Colour',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('number', models.PositiveIntegerField()),
('slug', models.SlugField(default='', editable=False, max_length=64)),
('tlg_name', models.CharField(blank=True, default='', max_length=256)),
('tlg_number', models.PositiveIntegerField(blank=True, null=True)),
('ldraw_name', models.CharField(blank=True, default='', max_length=256)),
('ldraw_number', models.PositiveIntegerField(blank=True, null=True)),
('other_names', models.JSONField(default=list)),
],
),
migrations.CreateModel(
name='Element',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lego_ids', models.JSONField(default=list)),
('colour', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='elements', to='brixdb.colour')),
],
),
migrations.CreateModel(
name='Minifig',
fields=[
('catalogitem_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='brixdb.catalogitem')),
],
options={
'ordering': ('name',),
},
bases=('brixdb.catalogitem',),
),
migrations.CreateModel(
name='Part',
fields=[
('catalogitem_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='brixdb.catalogitem')),
],
options={
'ordering': ('name',),
},
bases=('brixdb.catalogitem',),
),
migrations.CreateModel(
name='Set',
fields=[
('catalogitem_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='brixdb.catalogitem')),
],
options={
'ordering': ('number',),
},
bases=('brixdb.catalogitem',),
),
migrations.CreateModel(
name='OwnedItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveIntegerField(default=1)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='owners', to='brixdb.catalogitem')),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='owned_items', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ItemInventory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveIntegerField(default=1)),
('is_extra', models.BooleanField(default=False)),
('is_counterpart', models.BooleanField(default=False)),
('is_alternate', models.BooleanField(default=False)),
('match_id', models.PositiveIntegerField(blank=True, default=0)),
('element', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='part_of', to='brixdb.element')),
('item', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='part_of', to='brixdb.catalogitem')),
('part_of', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='inventory', to='brixdb.catalogitem')),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256)),
('slug', models.SlugField(default='', max_length=64)),
('bl_id', models.CharField(max_length=64)),
('bl_category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='brixdb.bricklinkcategory')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sub_categories', to='brixdb.category')),
],
),
migrations.AddField(
model_name='catalogitem',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='brixdb.category'),
),
migrations.AddField(
model_name='catalogitem',
name='polymorphic_ctype',
field=models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_brixdb.catalogitem_set+', to='contenttypes.contenttype'),
),
migrations.CreateModel(
name='BnPElementPrices',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_seen', models.DateTimeField(default=django.utils.timezone.now)),
('currency', models.CharField(max_length=3)),
('price', models.DecimalField(decimal_places=2, max_digits=9)),
('element', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='prices', to='brixdb.bnpelement')),
],
),
migrations.AddField(
model_name='bnpelement',
name='element',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bnp_elements', to='brixdb.element'),
),
migrations.CreateModel(
name='LugbulkOrder',
fields=[
('catalogitem_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='brixdb.catalogitem')),
('period', models.CharField(max_length=8)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lugbulk_orders', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('period', 'owner'),
},
bases=('brixdb.catalogitem',),
),
migrations.AddField(
model_name='element',
name='part',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='elements', to='brixdb.part'),
),
migrations.AlterUniqueTogether(
name='catalogitem',
unique_together={('item_type', 'number')},
),
migrations.CreateModel(
name='BulkLot',
fields=[
('catalogitem_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='brixdb.catalogitem')),
('acquired', models.DateTimeField(blank=True, null=True)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bulk_lots', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('name', 'owner'),
},
bases=('brixdb.catalogitem',),
),
migrations.CreateModel(
name='BricklinkOrder',
fields=[
('catalogitem_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='brixdb.catalogitem')),
('ordered', models.DateTimeField(blank=True, null=True)),
('seller_username', models.CharField(blank=True, default='', max_length=256)),
('price', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)),
('shipping', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)),
('fees', models.DecimalField(blank=True, decimal_places=2, max_digits=9, null=True)),
('currency', models.CharField(blank=True, default='', max_length=3)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bricklink_orders', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('number',),
},
bases=('brixdb.catalogitem',),
),
migrations.AlterUniqueTogether(
name='element',
unique_together={('part', 'colour')},
),
]
| jensadne/brixdb | brixdb/migrations/0001_initial.py | Python | mit | 12,872 |
import os
import unittest
class UnitTests(unittest.TestCase):
def setUp(self):
print('SetUp Complete.')
def test_assert_string(self): #testString):
db_path = '../data/db.txt'
self.assertTrue(os.path.isfile(db_path))
data_file = open(db_path,'r')
data_list = data_file.readlines()
row_count = len(data_list)
if row_count > 0:
for line in data_list:
print(line, end='')
data_file.close()
self.assertEqual(data_list[0].replace('\n',''), 'Hello')
@unittest.skip("Only to Test Functionality.")
def test_print(self):
print(os.path.dirname(os.path.abspath(__file__)), '\n')
def tearDown(self):
print('TearDown Complete.')
if __name__ == '__main__':
unittest.main() | MAhlers/python | HelloWorld/tests/test_hello_world.py | Python | mit | 737 |
# -*- coding: utf-8 -*-
# Copyright © 2014 Puneeth Chaganti and others.
# See the LICENSE file for license rights and limitations (MIT).
import os
from os.path import abspath, dirname, exists, join
def read_heroku_env_file(name='.env'):
env_path = join(dirname(abspath(__file__)), name)
env = {}
if exists(env_path):
with open(env_path) as f:
for line in f:
key, value = line.split('=')
env[key.strip()] = value.strip()
return env
def get_config_var(name, default=''):
value = os.environ.get(name, alternate_env.get(name, default))
return value
alternate_env = read_heroku_env_file()
# Flask sample config
SQLALCHEMY_DATABASE_URI = get_config_var('DATABASE_URL', 'sqlite:///github.db')
SECRET_KEY = get_config_var('SECRET_KEY', 'top secret!')
CLIENT_ID = get_config_var('CLIENT_ID', 'x'*20)
CLIENT_SECRET = get_config_var('CLIENT_SECRET', 'y'*40)
STATE = get_config_var('STATE', '')
| punchagan/statiki | settings.py | Python | mit | 969 |
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.sites.shortcuts import get_current_site
from django.http import HttpResponseRedirect
from django.urls import reverse, reverse_lazy
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
from ..account import app_settings as account_settings
from ..account.adapter import get_adapter as get_account_adapter
from ..account.views import (
AjaxCapableProcessFormViewMixin,
CloseableSignupMixin,
RedirectAuthenticatedUserMixin,
)
from ..utils import get_form_class
from . import app_settings, helpers
from .adapter import get_adapter
from .forms import DisconnectForm, SignupForm
from .models import SocialAccount, SocialLogin
class SignupView(
RedirectAuthenticatedUserMixin,
CloseableSignupMixin,
AjaxCapableProcessFormViewMixin,
FormView,
):
form_class = SignupForm
template_name = "socialaccount/signup." + account_settings.TEMPLATE_EXTENSION
def get_form_class(self):
return get_form_class(app_settings.FORMS, "signup", self.form_class)
def dispatch(self, request, *args, **kwargs):
self.sociallogin = None
data = request.session.get("socialaccount_sociallogin")
if data:
self.sociallogin = SocialLogin.deserialize(data)
if not self.sociallogin:
return HttpResponseRedirect(reverse("account_login"))
return super(SignupView, self).dispatch(request, *args, **kwargs)
def is_open(self):
return get_adapter(self.request).is_open_for_signup(
self.request, self.sociallogin
)
def get_form_kwargs(self):
ret = super(SignupView, self).get_form_kwargs()
ret["sociallogin"] = self.sociallogin
return ret
def form_valid(self, form):
self.request.session.pop("socialaccount_sociallogin", None)
form.save(self.request)
return helpers.complete_social_signup(self.request, self.sociallogin)
def get_context_data(self, **kwargs):
ret = super(SignupView, self).get_context_data(**kwargs)
ret.update(
dict(
site=get_current_site(self.request),
account=self.sociallogin.account,
)
)
return ret
def get_authenticated_redirect_url(self):
return reverse(connections)
signup = SignupView.as_view()
class LoginCancelledView(TemplateView):
template_name = (
"socialaccount/login_cancelled." + account_settings.TEMPLATE_EXTENSION
)
login_cancelled = LoginCancelledView.as_view()
class LoginErrorView(TemplateView):
template_name = (
"socialaccount/authentication_error." + account_settings.TEMPLATE_EXTENSION
)
login_error = LoginErrorView.as_view()
class ConnectionsView(AjaxCapableProcessFormViewMixin, FormView):
template_name = "socialaccount/connections." + account_settings.TEMPLATE_EXTENSION
form_class = DisconnectForm
success_url = reverse_lazy("socialaccount_connections")
def get_form_class(self):
return get_form_class(app_settings.FORMS, "disconnect", self.form_class)
def get_form_kwargs(self):
kwargs = super(ConnectionsView, self).get_form_kwargs()
kwargs["request"] = self.request
return kwargs
def form_valid(self, form):
get_account_adapter().add_message(
self.request,
messages.INFO,
"socialaccount/messages/account_disconnected.txt",
)
form.save()
return super(ConnectionsView, self).form_valid(form)
def get_ajax_data(self):
account_data = []
for account in SocialAccount.objects.filter(user=self.request.user):
provider_account = account.get_provider_account()
account_data.append(
{
"id": account.pk,
"provider": account.provider,
"name": provider_account.to_str(),
}
)
return {"socialaccounts": account_data}
connections = login_required(ConnectionsView.as_view())
| pennersr/django-allauth | allauth/socialaccount/views.py | Python | mit | 4,170 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class ApplicationGatewayHttpListener(SubResource):
"""Http listener of an application gateway.
:param id: Resource ID.
:type id: str
:param frontend_ip_configuration: Frontend IP configuration resource of an
application gateway.
:type frontend_ip_configuration:
~azure.mgmt.network.v2017_11_01.models.SubResource
:param frontend_port: Frontend port resource of an application gateway.
:type frontend_port: ~azure.mgmt.network.v2017_11_01.models.SubResource
:param protocol: Protocol. Possible values include: 'Http', 'Https'
:type protocol: str or
~azure.mgmt.network.v2017_11_01.models.ApplicationGatewayProtocol
:param host_name: Host name of HTTP listener.
:type host_name: str
:param ssl_certificate: SSL certificate resource of an application
gateway.
:type ssl_certificate: ~azure.mgmt.network.v2017_11_01.models.SubResource
:param require_server_name_indication: Applicable only if protocol is
https. Enables SNI for multi-hosting.
:type require_server_name_indication: bool
:param provisioning_state: Provisioning state of the HTTP listener
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param type: Type of the resource.
:type type: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'frontend_ip_configuration': {'key': 'properties.frontendIPConfiguration', 'type': 'SubResource'},
'frontend_port': {'key': 'properties.frontendPort', 'type': 'SubResource'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'host_name': {'key': 'properties.hostName', 'type': 'str'},
'ssl_certificate': {'key': 'properties.sslCertificate', 'type': 'SubResource'},
'require_server_name_indication': {'key': 'properties.requireServerNameIndication', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, *, id: str=None, frontend_ip_configuration=None, frontend_port=None, protocol=None, host_name: str=None, ssl_certificate=None, require_server_name_indication: bool=None, provisioning_state: str=None, name: str=None, etag: str=None, type: str=None, **kwargs) -> None:
super(ApplicationGatewayHttpListener, self).__init__(id=id, **kwargs)
self.frontend_ip_configuration = frontend_ip_configuration
self.frontend_port = frontend_port
self.protocol = protocol
self.host_name = host_name
self.ssl_certificate = ssl_certificate
self.require_server_name_indication = require_server_name_indication
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
self.type = type
| lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/application_gateway_http_listener_py3.py | Python | mit | 3,718 |
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import json
import os
import pipes
import random
import re
import stat
import tempfile
import time
from abc import ABCMeta, abstractmethod
from ansible.compat.six import binary_type, text_type, iteritems, with_metaclass
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.executor.module_common import modify_module
from ansible.parsing.utils.jsonify import jsonify
from ansible.utils.unicode import to_bytes, to_unicode
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionBase(with_metaclass(ABCMeta, object)):
'''
This class is the base class for all action plugins, and defines
code common to all actions. The base class handles the connection
by putting/getting files and executing commands based on the current
action in use.
'''
def __init__(self, task, connection, play_context, loader, templar, shared_loader_obj):
self._task = task
self._connection = connection
self._play_context = play_context
self._loader = loader
self._templar = templar
self._shared_loader_obj = shared_loader_obj
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
self._supports_check_mode = True
@abstractmethod
def run(self, tmp=None, task_vars=None):
""" Action Plugins should implement this method to perform their
tasks. Everything else in this base class is a helper method for the
action plugin to do that.
:kwarg tmp: Temporary directory. Sometimes an action plugin sets up
a temporary directory and then calls another module. This parameter
allows us to reuse the same directory for both.
:kwarg task_vars: The variables (host vars, group vars, config vars,
etc) associated with this task.
:returns: dictionary of results from the module
Implementors of action modules may find the following variables especially useful:
* Module parameters. These are stored in self._task.args
"""
# store the module invocation details into the results
results = {}
if self._task.async == 0:
results['invocation'] = dict(
module_name = self._task.action,
module_args = self._task.args,
)
return results
def _configure_module(self, module_name, module_args, task_vars=None):
'''
Handles the loading and templating of the module code through the
modify_module() function.
'''
if task_vars is None:
task_vars = dict()
# Search module path(s) for named module.
for mod_type in self._connection.module_implementation_preferences:
# Check to determine if PowerShell modules are supported, and apply
# some fixes (hacks) to module name + args.
if mod_type == '.ps1':
# win_stat, win_file, and win_copy are not just like their
# python counterparts but they are compatible enough for our
# internal usage
if module_name in ('stat', 'file', 'copy') and self._task.action != module_name:
module_name = 'win_%s' % module_name
# Remove extra quotes surrounding path parameters before sending to module.
if module_name in ('win_stat', 'win_file', 'win_copy', 'slurp') and module_args and hasattr(self._connection._shell, '_unquote'):
for key in ('src', 'dest', 'path'):
if key in module_args:
module_args[key] = self._connection._shell._unquote(module_args[key])
module_path = self._shared_loader_obj.module_loader.find_plugin(module_name, mod_type)
if module_path:
break
else: # This is a for-else: http://bit.ly/1ElPkyg
# Use Windows version of ping module to check module paths when
# using a connection that supports .ps1 suffixes. We check specifically
# for win_ping here, otherwise the code would look for ping.ps1
if '.ps1' in self._connection.module_implementation_preferences:
ping_module = 'win_ping'
else:
ping_module = 'ping'
module_path2 = self._shared_loader_obj.module_loader.find_plugin(ping_module, self._connection.module_implementation_preferences)
if module_path2 is not None:
raise AnsibleError("The module %s was not found in configured module paths" % (module_name))
else:
raise AnsibleError("The module %s was not found in configured module paths. "
"Additionally, core modules are missing. If this is a checkout, "
"run 'git submodule update --init --recursive' to correct this problem." % (module_name))
# insert shared code and arguments into the module
(module_data, module_style, module_shebang) = modify_module(module_path, module_args, task_vars=task_vars)
return (module_style, module_shebang, module_data)
def _compute_environment_string(self):
'''
Builds the environment string to be used when executing the remote task.
'''
final_environment = dict()
if self._task.environment is not None:
environments = self._task.environment
if not isinstance(environments, list):
environments = [ environments ]
# the environments as inherited need to be reversed, to make
# sure we merge in the parent's values first so those in the
# block then task 'win' in precedence
environments.reverse()
for environment in environments:
if environment is None:
continue
temp_environment = self._templar.template(environment)
if not isinstance(temp_environment, dict):
raise AnsibleError("environment must be a dictionary, received %s (%s)" % (temp_environment, type(temp_environment)))
# very deliberately using update here instead of combine_vars, as
# these environment settings should not need to merge sub-dicts
final_environment.update(temp_environment)
final_environment = self._templar.template(final_environment)
return self._connection._shell.env_prefix(**final_environment)
def _early_needs_tmp_path(self):
'''
Determines if a temp path should be created before the action is executed.
'''
return getattr(self, 'TRANSFERS_FILES', False)
def _late_needs_tmp_path(self, tmp, module_style):
'''
Determines if a temp path is required after some early actions have already taken place.
'''
if tmp and "tmp" in tmp:
# tmp has already been created
return False
if not self._connection.has_pipelining or not self._play_context.pipelining or C.DEFAULT_KEEP_REMOTE_FILES or self._play_context.become_method == 'su':
# tmp is necessary to store the module source code
# or we want to keep the files on the target system
return True
if module_style != "new":
# even when conn has pipelining, old style modules need tmp to store arguments
return True
return False
def _make_tmp_path(self):
'''
Create and return a temporary path on a remote box.
'''
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
use_system_tmp = False
if self._play_context.become and self._play_context.become_user != 'root':
use_system_tmp = True
tmp_mode = None
if self._play_context.remote_user != 'root' or self._play_context.become and self._play_context.become_user != 'root':
tmp_mode = 0o755
cmd = self._connection._shell.mkdtemp(basefile, use_system_tmp, tmp_mode)
result = self._low_level_execute_command(cmd, sudoable=False)
# error handling on this seems a little aggressive?
if result['rc'] != 0:
if result['rc'] == 5:
output = 'Authentication failure.'
elif result['rc'] == 255 and self._connection.transport in ('ssh',):
if self._play_context.verbosity > 3:
output = u'SSH encountered an unknown error. The output was:\n%s%s' % (result['stdout'], result['stderr'])
else:
output = (u'SSH encountered an unknown error during the connection.'
' We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue')
elif u'No space left on device' in result['stderr']:
output = result['stderr']
else:
output = ('Authentication or permission failure.'
' In some cases, you may have been able to authenticate and did not have permissions on the remote directory.'
' Consider changing the remote temp path in ansible.cfg to a path rooted in "/tmp".'
' Failed command was: %s, exited with result %d' % (cmd, result['rc']))
if 'stdout' in result and result['stdout'] != u'':
output = output + u": %s" % result['stdout']
raise AnsibleConnectionFailure(output)
try:
rc = self._connection._shell.join_path(result['stdout'].strip(), u'').splitlines()[-1]
except IndexError:
# stdout was empty or just space, set to / to trigger error in next if
rc = '/'
# Catch failure conditions, files should never be
# written to locations in /.
if rc == '/':
raise AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basefile, cmd))
return rc
def _remove_tmp_path(self, tmp_path):
'''Remove a temporary path we created. '''
if tmp_path and "-tmp-" in tmp_path:
cmd = self._connection._shell.remove(tmp_path, recurse=True)
# If we have gotten here we have a working ssh configuration.
# If ssh breaks we could leave tmp directories out on the remote system.
self._low_level_execute_command(cmd, sudoable=False)
def _transfer_data(self, remote_path, data):
'''
Copies the module data out to the temporary module path.
'''
if isinstance(data, dict):
data = jsonify(data)
afd, afile = tempfile.mkstemp()
afo = os.fdopen(afd, 'w')
try:
data = to_bytes(data, errors='strict')
afo.write(data)
except Exception as e:
#raise AnsibleError("failure encoding into utf-8: %s" % str(e))
raise AnsibleError("failure writing module data to temporary file for transfer: %s" % str(e))
afo.flush()
afo.close()
try:
self._connection.put_file(afile, remote_path)
finally:
os.unlink(afile)
return remote_path
def _remote_chmod(self, mode, path, sudoable=False):
'''
Issue a remote chmod command
'''
cmd = self._connection._shell.chmod(mode, path)
res = self._low_level_execute_command(cmd, sudoable=sudoable)
return res
def _execute_remote_stat(self, path, all_vars, follow, tmp=None):
'''
Get information from remote file.
'''
module_args=dict(
path=path,
follow=follow,
get_md5=False,
get_checksum=True,
checksum_algo='sha1',
)
mystat = self._execute_module(module_name='stat', module_args=module_args, task_vars=all_vars, tmp=tmp, delete_remote_tmp=(tmp is None))
if 'failed' in mystat and mystat['failed']:
raise AnsibleError('Failed to get information on remote file (%s): %s' % (path, mystat['msg']))
if not mystat['stat']['exists']:
# empty might be matched, 1 should never match, also backwards compatible
mystat['stat']['checksum'] = '1'
# happens sometimes when it is a dir and not on bsd
if not 'checksum' in mystat['stat']:
mystat['stat']['checksum'] = ''
return mystat['stat']
def _remote_checksum(self, path, all_vars):
'''
Produces a remote checksum given a path,
Returns a number 0-4 for specific errors instead of checksum, also ensures it is different
0 = unknown error
1 = file does not exist, this might not be an error
2 = permissions issue
3 = its a directory, not a file
4 = stat module failed, likely due to not finding python
'''
x = "0" # unknown error has occured
try:
remote_stat = self._execute_remote_stat(path, all_vars, follow=False)
if remote_stat['exists'] and remote_stat['isdir']:
x = "3" # its a directory not a file
else:
x = remote_stat['checksum'] # if 1, file is missing
except AnsibleError as e:
errormsg = to_unicode(e)
if errormsg.endswith('Permission denied'):
x = "2" # cannot read file
elif errormsg.endswith('MODULE FAILURE'):
x = "4" # python not found or module uncaught exception
finally:
return x
def _remote_expand_user(self, path):
''' takes a remote path and performs tilde expansion on the remote host '''
if not path.startswith('~'): # FIXME: Windows paths may start with "~ instead of just ~
return path
# FIXME: Can't use os.path.sep for Windows paths.
split_path = path.split(os.path.sep, 1)
expand_path = split_path[0]
if expand_path == '~':
if self._play_context.become and self._play_context.become_user:
expand_path = '~%s' % self._play_context.become_user
cmd = self._connection._shell.expand_user(expand_path)
data = self._low_level_execute_command(cmd, sudoable=False)
#initial_fragment = utils.last_non_blank_line(data['stdout'])
initial_fragment = data['stdout'].strip().splitlines()[-1]
if not initial_fragment:
# Something went wrong trying to expand the path remotely. Return
# the original string
return path
if len(split_path) > 1:
return self._connection._shell.join_path(initial_fragment, *split_path[1:])
else:
return initial_fragment
def _filter_leading_non_json_lines(self, data):
'''
Used to avoid random output from SSH at the top of JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
filter only leading lines since multiline JSON is valid.
'''
idx = 0
for line in data.splitlines(True):
if line.startswith((u'{', u'[')):
break
idx = idx + len(line)
return data[idx:]
def _strip_success_message(self, data):
'''
Removes the BECOME-SUCCESS message from the data.
'''
if data.strip().startswith('BECOME-SUCCESS-'):
data = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', data)
return data
def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=True):
'''
Transfer and run a module along with its arguments.
'''
if task_vars is None:
task_vars = dict()
# if a module name was not specified for this execution, use
# the action from the task
if module_name is None:
module_name = self._task.action
if module_args is None:
module_args = self._task.args
# set check mode in the module arguments, if required
if self._play_context.check_mode:
if not self._supports_check_mode:
raise AnsibleError("check mode is not supported for this operation")
module_args['_ansible_check_mode'] = True
else:
module_args['_ansible_check_mode'] = False
# set no log in the module arguments, if required
module_args['_ansible_no_log'] = self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG
# set debug in the module arguments, if required
module_args['_ansible_debug'] = C.DEFAULT_DEBUG
# let module know we are in diff mode
module_args['_ansible_diff'] = self._play_context.diff
# let module know our verbosity
module_args['_ansible_verbosity'] = self._display.verbosity
(module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
if not shebang:
raise AnsibleError("module (%s) is missing interpreter line" % module_name)
# a remote tmp path may be necessary and not already created
remote_module_path = None
args_file_path = None
if not tmp and self._late_needs_tmp_path(tmp, module_style):
tmp = self._make_tmp_path()
if tmp:
remote_module_filename = self._connection._shell.get_remote_filename(module_name)
remote_module_path = self._connection._shell.join_path(tmp, remote_module_filename)
if module_style in ['old', 'non_native_want_json']:
# we'll also need a temp file to hold our module arguments
args_file_path = self._connection._shell.join_path(tmp, 'args')
if remote_module_path or module_style != 'new':
display.debug("transferring module to remote")
self._transfer_data(remote_module_path, module_data)
if module_style == 'old':
# we need to dump the module args to a k=v string in a file on
# the remote system, which can be read and parsed by the module
args_data = ""
for k,v in iteritems(module_args):
args_data += '%s="%s" ' % (k, pipes.quote(text_type(v)))
self._transfer_data(args_file_path, args_data)
elif module_style == 'non_native_want_json':
self._transfer_data(args_file_path, json.dumps(module_args))
display.debug("done transferring module to remote")
environment_string = self._compute_environment_string()
if tmp and "tmp" in tmp and self._play_context.become and self._play_context.become_user != 'root':
# deal with possible umask issues once sudo'ed to other user
self._remote_chmod('a+r', remote_module_path)
if args_file_path is not None:
self._remote_chmod('a+r', args_file_path)
cmd = ""
in_data = None
if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES and module_style == 'new':
in_data = module_data
else:
if remote_module_path:
cmd = remote_module_path
rm_tmp = None
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
if not self._play_context.become or self._play_context.become_user == 'root':
# not sudoing or sudoing to root, so can cleanup files in the same step
rm_tmp = tmp
cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, arg_path=args_file_path, rm_tmp=rm_tmp)
cmd = cmd.strip()
sudoable = True
if module_name == "accelerate":
# always run the accelerate module as the user
# specified in the play, not the sudo_user
sudoable = False
res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data)
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
if self._play_context.become and self._play_context.become_user != 'root':
# not sudoing to root, so maybe can't delete files as that other user
# have to clean up temp files as original user in a second step
cmd2 = self._connection._shell.remove(tmp, recurse=True)
self._low_level_execute_command(cmd2, sudoable=False)
try:
data = json.loads(self._filter_leading_non_json_lines(res.get('stdout', u'')))
except ValueError:
# not valid json, lets try to capture error
data = dict(failed=True, parsed=False)
data['msg'] = "MODULE FAILURE"
data['module_stdout'] = res.get('stdout', u'')
if 'stderr' in res:
data['module_stderr'] = res['stderr']
if res['stderr'].startswith(u'Traceback'):
data['exception'] = res['stderr']
# pre-split stdout into lines, if stdout is in the data and there
# isn't already a stdout_lines value there
if 'stdout' in data and 'stdout_lines' not in data:
data['stdout_lines'] = data.get('stdout', u'').splitlines()
display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
return data
def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, executable=C.DEFAULT_EXECUTABLE, encoding_errors='replace'):
'''
This is the function which executes the low level shell command, which
may be commands to create/remove directories for temporary files, or to
run the module code or python directly when pipelining.
:kwarg encoding_errors: If the value returned by the command isn't
utf-8 then we have to figure out how to transform it to unicode.
If the value is just going to be displayed to the user (or
discarded) then the default of 'replace' is fine. If the data is
used as a key or is going to be written back out to a file
verbatim, then this won't work. May have to use some sort of
replacement strategy (python3 could use surrogateescape)
'''
display.debug("_low_level_execute_command(): starting")
if not cmd:
# this can happen with powershell modules when there is no analog to a Windows command (like chmod)
display.debug("_low_level_execute_command(): no command, exiting")
return dict(stdout='', stderr='')
allow_same_user = C.BECOME_ALLOW_SAME_USER
same_user = self._play_context.become_user == self._play_context.remote_user
if sudoable and self._play_context.become and (allow_same_user or not same_user):
display.debug("_low_level_execute_command(): using become for this command")
cmd = self._play_context.make_become_cmd(cmd, executable=executable)
if executable is not None and self._connection.allow_executable:
cmd = executable + ' -c ' + pipes.quote(cmd)
display.debug("_low_level_execute_command(): executing: %s" % (cmd,))
rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable)
# stdout and stderr may be either a file-like or a bytes object.
# Convert either one to a text type
if isinstance(stdout, binary_type):
out = to_unicode(stdout, errors=encoding_errors)
elif not isinstance(stdout, text_type):
out = to_unicode(b''.join(stdout.readlines()), errors=encoding_errors)
else:
out = stdout
if isinstance(stderr, binary_type):
err = to_unicode(stderr, errors=encoding_errors)
elif not isinstance(stderr, text_type):
err = to_unicode(b''.join(stderr.readlines()), errors=encoding_errors)
else:
err = stderr
if rc is None:
rc = 0
# be sure to remove the BECOME-SUCCESS message now
out = self._strip_success_message(out)
display.debug("_low_level_execute_command() done: rc=%d, stdout=%s, stderr=%s" % (rc, stdout, stderr))
return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err)
def _get_first_available_file(self, faf, of=None, searchdir='files'):
display.deprecated("first_available_file, use with_first_found or lookup('first_found',...) instead")
for fn in faf:
fnt = self._templar.template(fn)
if self._task._role is not None:
lead = self._task._role._role_path
else:
lead = fnt
fnd = self._loader.path_dwim_relative(lead, searchdir, fnt)
if not os.path.exists(fnd) and of is not None:
if self._task._role is not None:
lead = self._task._role._role_path
else:
lead = of
fnd = self._loader.path_dwim_relative(lead, searchdir, of)
if os.path.exists(fnd):
return fnd
return None
def _get_diff_data(self, destination, source, task_vars, source_file=True):
diff = {}
display.debug("Going to peek to see if file has changed permissions")
peek_result = self._execute_module(module_name='file', module_args=dict(path=destination, diff_peek=True), task_vars=task_vars, persist_files=True)
if not('failed' in peek_result and peek_result['failed']) or peek_result.get('rc', 0) == 0:
if peek_result['state'] == 'absent':
diff['before'] = ''
elif peek_result['appears_binary']:
diff['dst_binary'] = 1
elif C.MAX_FILE_SIZE_FOR_DIFF > 0 and peek_result['size'] > C.MAX_FILE_SIZE_FOR_DIFF:
diff['dst_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
else:
display.debug("Slurping the file %s" % source)
dest_result = self._execute_module(module_name='slurp', module_args=dict(path=destination), task_vars=task_vars, persist_files=True)
if 'content' in dest_result:
dest_contents = dest_result['content']
if dest_result['encoding'] == 'base64':
dest_contents = base64.b64decode(dest_contents)
else:
raise AnsibleError("unknown encoding in content option, failed: %s" % dest_result)
diff['before_header'] = destination
diff['before'] = dest_contents
if source_file:
st = os.stat(source)
if C.MAX_FILE_SIZE_FOR_DIFF > 0 and st[stat.ST_SIZE] > C.MAX_FILE_SIZE_FOR_DIFF:
diff['src_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
else:
display.debug("Reading local copy of the file %s" % source)
try:
src = open(source)
src_contents = src.read()
except Exception as e:
raise AnsibleError("Unexpected error while reading source (%s) for diff: %s " % (source, str(e)))
if "\x00" in src_contents:
diff['src_binary'] = 1
else:
diff['after_header'] = source
diff['after'] = src_contents
else:
display.debug("source of file passed in")
diff['after_header'] = 'dynamically generated'
diff['after'] = source
if self._play_context.no_log:
if 'before' in diff:
diff["before"] = ""
if 'after' in diff:
diff["after"] = " [[ Diff output has been hidden because 'no_log: true' was specified for this result ]]"
return diff
| xpac1985/ansible | lib/ansible/plugins/action/__init__.py | Python | gpl-3.0 | 29,391 |
import httplib
import itertools
from flask import (
Flask,
abort,
make_response,
render_template,
request,
send_from_directory,
url_for,
)
from werkzeug import secure_filename
import os
from tempfile import mkdtemp
from urlobject import URLObject
from builder import build_docs, unzip_docs
from raven.contrib.flask import Sentry
from sentry_dsn import SENTRY_DSN
from rq_dashboard import RQDashboard
from rq_queues import default_queue, retry_queue, failed_queue
# Tell RQ what Redis connection to use
app = Flask(__name__)
app.config["DOCS_ROOT"] = "/opt/devdocs/docs"
app.config["DEBUG"] = True
RQDashboard(app)
sentry = Sentry(app, dsn=SENTRY_DSN)
if not os.path.exists(app.config["DOCS_ROOT"]):
os.makedirs(app.config["DOCS_ROOT"])
@app.route("/")
def index():
return render_template("index.html", projects=get_projects(), queue=get_queue())
@app.route("/build", methods=["POST"])
def build():
default_queue.enqueue_call(
build_docs,
args=(request.values["url"], app.config["DOCS_ROOT"], request.values.get("pypi_url", None)))
return "Queued"
@app.route("/upload/<package_name>/<version_name>", methods=["POST"])
def upload(package_name, version_name):
if len(request.files) != 1:
return make_response(("File upload requires one file", httplib.BAD_REQUEST, {}))
[(_, uploaded_file)] = request.files.items()
filename = secure_filename(uploaded_file.filename)
directory = mkdtemp()
local_filename = os.path.join(directory, filename)
uploaded_file.save(local_filename)
default_queue.enqueue_call(
unzip_docs, args=(local_filename, app.config["DOCS_ROOT"], package_name, version_name)
)
return "Queued"
@app.route("/dash/<package_name>.xml")
def generate_docset_xml(package_name):
version_filename = os.path.join(app.config["DOCS_ROOT"], package_name, "metadata", "version")
if not os.path.isfile(version_filename):
abort(httplib.NOT_FOUND)
with open(version_filename) as version_file:
version = version_file.read().strip()
docset_url = URLObject(request.base_url).\
with_path(url_for("get_docset", package_name=package_name, filename=package_name + ".tgz"))
return """<entry>
<version>{version}</version>
<url>{url}</url>
</entry>""".format(version=version, url=docset_url)
@app.route("/dash/<package_name>/<path:filename>")
def get_docset(package_name, filename):
return send_from_directory(os.path.join(app.config["DOCS_ROOT"], package_name, "dash"), filename)
@app.route("/sphinx/<package_name>/")
@app.route("/sphinx/<package_name>/<path:filename>")
def serve_sphinx(package_name, filename="index.html"):
return send_from_directory(
os.path.join(app.config["DOCS_ROOT"], package_name, "sphinx", "html"),
filename
)
def get_projects():
for project_name in sorted(os.listdir(app.config["DOCS_ROOT"]), key=str.lower):
project_root = os.path.join(app.config["DOCS_ROOT"], project_name)
project = {}
for attr in ["package_name", "version"]:
with open(os.path.join(project_root, "metadata", attr)) as attr_file:
project[attr] = attr_file.read().strip()
project["has_dash"] = os.path.isdir(os.path.join(project_root, "dash"))
yield project
def get_queue():
return itertools.chain.from_iterable([
(itertools.izip(itertools.repeat(status), itertools.repeat(classes), queue.get_jobs()))
for status, classes, queue in [("Pending", ["pending"], default_queue),
("Pending Retry", ["retry"], retry_queue),
("Failed", ["failed"], failed_queue)]])
if __name__ == "__main__":
app.run(debug=True, port=8080)
| vmalloc/devdocs | webapp/flask_app.py | Python | bsd-3-clause | 3,785 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-10 17:01
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='OrderSoa',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('client_name', models.CharField(max_length=254, verbose_name='Client Name')),
('attachment_file', models.FileField(upload_to='oder_soa')),
('is_submit', models.BooleanField(default=False, verbose_name='Is submit?')),
('is_complete', models.BooleanField(default=False, verbose_name='Is completed?')),
],
options={
'verbose_name': 'Order SOA',
'verbose_name_plural': 'Order SOA',
},
),
]
| nikkomidoy/project_soa | soamgr/migrations/0001_initial.py | Python | mit | 1,335 |
from gui.resources import *
class Button(ttk.Button):
"""
Extends ttk.Button so arrow keys can be used to traverse
between buttons
"""
def __init__(self, *args, **kwargs):
ttk.Button.__init__(self, *args, **kwargs)
self.bind("<Return>", self.on_press)
self.bind("<Left>", self.traverse)
self.bind("<Right>", self.traverse)
def on_press(self, *event):
"""Enables an event (e.g. key press) to invoke the button"""
self.invoke()
def traverse(self, event):
"""
Traverses buttons in the direction of the pressed arrow key.
"""
widget = event.widget
if event.keysym == "Left":
# find the previous widget
tcl_obj = self.tk.call('tk_focusPrev', widget._w)
prev_widget = self.nametowidget(tcl_obj.string)
if isinstance(prev_widget, ttk.Button):
prev_widget.focus()
if event.keysym == "Right":
# find the next widget
tcl_obj = self.tk.call('tk_focusNext', widget._w)
next_widget = self.nametowidget(tcl_obj.string)
if isinstance(next_widget, ttk.Button):
next_widget.focus()
| LincolnPuzey/ZirconsRock | zircons_rock/gui/widgets/button.py | Python | gpl-3.0 | 1,227 |
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains helper functions used to create protocol message classes from
Descriptor objects at runtime backed by the protocol buffer C++ API.
"""
from __future__ import unicode_literals
__author__ = '[email protected] (Petar Petrov)'
import operator
from google.protobuf.internal import _net_proto2___python
from google.protobuf.internal.utils import cmp
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import message
from google.internal.utils import bytestr_to_string
import sys
if sys.version > '3':
import collections
import copyreg
def is_sequence(other):
return isinstance(other, collections.Sequence)
def copy_reg_pickle(type, function):
return copyreg.pickle(type,function)
else:
import copy_reg
def is_sequence(other):
return operator.isSequenceType(other)
def copy_reg_pickle(type, function):
return copy_reg.pickle(type,function)
_LABEL_REPEATED = _net_proto2___python.LABEL_REPEATED
_LABEL_OPTIONAL = _net_proto2___python.LABEL_OPTIONAL
_CPPTYPE_MESSAGE = _net_proto2___python.CPPTYPE_MESSAGE
_TYPE_MESSAGE = _net_proto2___python.TYPE_MESSAGE
def GetDescriptorPool():
"""Creates a new DescriptorPool C++ object."""
return _net_proto2___python.NewCDescriptorPool()
_pool = GetDescriptorPool()
def GetFieldDescriptor(full_field_name):
"""Searches for a field descriptor given a full field name."""
return _pool.FindFieldByName(full_field_name)
def BuildFile(content):
"""Registers a new proto file in the underlying C++ descriptor pool."""
_net_proto2___python.BuildFile(content)
def GetExtensionDescriptor(full_extension_name):
"""Searches for extension descriptor given a full field name."""
return _pool.FindExtensionByName(full_extension_name)
def NewCMessage(full_message_name):
"""Creates a new C++ protocol message by its name."""
return _net_proto2___python.NewCMessage(full_message_name)
def ScalarProperty(cdescriptor):
"""Returns a scalar property for the given descriptor."""
def Getter(self):
return self._cmsg.GetScalar(cdescriptor)
def Setter(self, value):
self._cmsg.SetScalar(cdescriptor, value)
return property(Getter, Setter)
def CompositeProperty(cdescriptor, message_type):
"""Returns a Python property the given composite field."""
def Getter(self):
sub_message = self._composite_fields.get(cdescriptor.name, None)
if sub_message is None:
cmessage = self._cmsg.NewSubMessage(cdescriptor)
sub_message = message_type._concrete_class(__cmessage=cmessage)
self._composite_fields[cdescriptor.name] = sub_message
return sub_message
return property(Getter)
class RepeatedScalarContainer(object):
"""Container for repeated scalar fields."""
__slots__ = ['_message', '_cfield_descriptor', '_cmsg']
def __init__(self, msg, cfield_descriptor):
self._message = msg
self._cmsg = msg._cmsg
self._cfield_descriptor = cfield_descriptor
def append(self, value):
self._cmsg.AddRepeatedScalar(
self._cfield_descriptor, value)
def extend(self, sequence):
for element in sequence:
self.append(element)
def insert(self, key, value):
values = self[slice(None, None, None)]
values.insert(key, value)
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def remove(self, value):
values = self[slice(None, None, None)]
values.remove(value)
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def __setitem__(self, key, value):
values = self[slice(None, None, None)]
values[key] = value
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor, values)
def __getitem__(self, key):
return self._cmsg.GetRepeatedScalar(self._cfield_descriptor, key)
def __delitem__(self, key):
self._cmsg.DeleteRepeatedField(self._cfield_descriptor, key)
def __len__(self):
return len(self[slice(None, None, None)])
def __eq__(self, other):
if self is other:
return True
if not is_sequence(other):
raise TypeError(
'Can only compare repeated scalar fields against sequences.')
# We are presumably comparing against some other sequence type.
return other == self[slice(None, None, None)]
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
def sort(self, *args, **kwargs):
# Maintain compatibility with the previous interface.
if 'sort_function' in kwargs:
kwargs['cmp'] = kwargs.pop('sort_function')
self._cmsg.AssignRepeatedScalar(self._cfield_descriptor,
sorted(self, *args, **kwargs))
def RepeatedScalarProperty(cdescriptor):
"""Returns a Python property the given repeated scalar field."""
def Getter(self):
container = self._composite_fields.get(cdescriptor.name, None)
if container is None:
container = RepeatedScalarContainer(self, cdescriptor)
self._composite_fields[cdescriptor.name] = container
return container
def Setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % cdescriptor.name)
doc = 'Magic attribute generated for "%s" proto field.' % cdescriptor.name
return property(Getter, Setter, doc=doc)
class RepeatedCompositeContainer(object):
"""Container for repeated composite fields."""
__slots__ = ['_message', '_subclass', '_cfield_descriptor', '_cmsg']
def __init__(self, msg, cfield_descriptor, subclass):
self._message = msg
self._cmsg = msg._cmsg
self._subclass = subclass
self._cfield_descriptor = cfield_descriptor
def add(self, **kwargs):
cmessage = self._cmsg.AddMessage(self._cfield_descriptor)
return self._subclass(__cmessage=cmessage, __owner=self._message, **kwargs)
def extend(self, elem_seq):
"""Extends by appending the given sequence of elements of the same type
as this one, copying each individual message.
"""
for message in elem_seq:
self.add().MergeFrom(message)
def remove(self, value):
# TODO(protocol-devel): This is inefficient as it needs to generate a
# message pointer for each message only to do index(). Move this to a C++
# extension function.
self.__delitem__(self[slice(None, None, None)].index(value))
def MergeFrom(self, other):
for message in other[:]:
self.add().MergeFrom(message)
def __getitem__(self, key):
cmessages = self._cmsg.GetRepeatedMessage(
self._cfield_descriptor, key)
subclass = self._subclass
if not isinstance(cmessages, list):
return subclass(__cmessage=cmessages, __owner=self._message)
return [subclass(__cmessage=m, __owner=self._message) for m in cmessages]
def __delitem__(self, key):
self._cmsg.DeleteRepeatedField(
self._cfield_descriptor, key)
def __len__(self):
return self._cmsg.FieldLength(self._cfield_descriptor)
def __eq__(self, other):
"""Compares the current instance with another one."""
if self is other:
return True
if not isinstance(other, self.__class__):
raise TypeError('Can only compare repeated composite fields against '
'other repeated composite fields.')
messages = self[slice(None, None, None)]
other_messages = other[slice(None, None, None)]
return messages == other_messages
def __hash__(self):
raise TypeError('unhashable object')
def sort(self, cmp=None, key=None, reverse=False, **kwargs):
# Maintain compatibility with the old interface.
if cmp is None and 'sort_function' in kwargs:
cmp = kwargs.pop('sort_function')
# The cmp function, if provided, is passed the results of the key function,
# so we only need to wrap one of them.
if key is None:
index_key = self.__getitem__
else:
index_key = lambda i: key(self[i])
# Sort the list of current indexes by the underlying object.
indexes = range(len(self))
indexes.sort(cmp=cmp, key=index_key, reverse=reverse)
# Apply the transposition.
for dest, src in enumerate(indexes):
if dest == src:
continue
self._cmsg.SwapRepeatedFieldElements(self._cfield_descriptor, dest, src)
# Don't swap the same value twice.
indexes[src] = src
def RepeatedCompositeProperty(cdescriptor, message_type):
"""Returns a Python property for the given repeated composite field."""
def Getter(self):
container = self._composite_fields.get(cdescriptor.name, None)
if container is None:
container = RepeatedCompositeContainer(
self, cdescriptor, message_type._concrete_class)
self._composite_fields[cdescriptor.name] = container
return container
def Setter(self, new_value):
raise AttributeError('Assignment not allowed to repeated field '
'"%s" in protocol message object.' % cdescriptor.name)
doc = 'Magic attribute generated for "%s" proto field.' % cdescriptor.name
return property(Getter, Setter, doc=doc)
class ExtensionDict(object):
"""Extension dictionary added to each protocol message."""
def __init__(self, msg):
self._message = msg
self._cmsg = msg._cmsg
self._values = {}
def __setitem__(self, extension, value):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
cdescriptor = extension._cdescriptor
if (cdescriptor.label != _LABEL_OPTIONAL or
cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
raise TypeError('Extension %r is repeated and/or a composite type.' % (
extension.full_name,))
self._cmsg.SetScalar(cdescriptor, value)
self._values[extension] = value
def __getitem__(self, extension):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
cdescriptor = extension._cdescriptor
if (cdescriptor.label != _LABEL_REPEATED and
cdescriptor.cpp_type != _CPPTYPE_MESSAGE):
return self._cmsg.GetScalar(cdescriptor)
ext = self._values.get(extension, None)
if ext is not None:
return ext
ext = self._CreateNewHandle(extension)
self._values[extension] = ext
return ext
def ClearExtension(self, extension):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
self._cmsg.ClearFieldByDescriptor(extension._cdescriptor)
if extension in self._values:
del self._values[extension]
def HasExtension(self, extension):
from google.protobuf import descriptor
if not isinstance(extension, descriptor.FieldDescriptor):
raise KeyError('Bad extension %r.' % (extension,))
return self._cmsg.HasFieldByDescriptor(extension._cdescriptor)
def _FindExtensionByName(self, name):
"""Tries to find a known extension with the specified name.
Args:
name: Extension full name.
Returns:
Extension field descriptor.
"""
return self._message._extensions_by_name.get(name, None)
def _CreateNewHandle(self, extension):
cdescriptor = extension._cdescriptor
if (cdescriptor.label != _LABEL_REPEATED and
cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
cmessage = self._cmsg.NewSubMessage(cdescriptor)
return extension.message_type._concrete_class(__cmessage=cmessage)
if cdescriptor.label == _LABEL_REPEATED:
if cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
return RepeatedCompositeContainer(
self._message, cdescriptor, extension.message_type._concrete_class)
else:
return RepeatedScalarContainer(self._message, cdescriptor)
# This shouldn't happen!
assert False
return None
def NewMessage(bases, message_descriptor, dictionary):
"""Creates a new protocol message *class*."""
_AddClassAttributesForNestedExtensions(message_descriptor, dictionary)
_AddEnumValues(message_descriptor, dictionary)
_AddDescriptors(message_descriptor, dictionary)
return bases
def InitMessage(message_descriptor, cls):
"""Constructs a new message instance (called before instance's __init__)."""
cls._extensions_by_name = {}
_AddInitMethod(message_descriptor, cls)
_AddMessageMethods(message_descriptor, cls)
_AddPropertiesForExtensions(message_descriptor, cls)
copy_reg_pickle(cls, lambda obj: (cls, (), obj.__getstate__()))
def _AddDescriptors(message_descriptor, dictionary):
"""Sets up a new protocol message class dictionary.
Args:
message_descriptor: A Descriptor instance describing this message type.
dictionary: Class dictionary to which we'll add a '__slots__' entry.
"""
dictionary['__descriptors'] = {}
for field in message_descriptor.fields:
dictionary['__descriptors'][field.name] = GetFieldDescriptor(
field.full_name)
dictionary['__slots__'] = list(iteritems(dictionary['__descriptors'])) + [
'_cmsg', '_owner', '_composite_fields', 'Extensions', '_HACK_REFCOUNTS']
def _AddEnumValues(message_descriptor, dictionary):
"""Sets class-level attributes for all enum fields defined in this message.
Args:
message_descriptor: Descriptor object for this message type.
dictionary: Class dictionary that should be populated.
"""
for enum_type in message_descriptor.enum_types:
dictionary[enum_type.name] = enum_type_wrapper.EnumTypeWrapper(enum_type)
for enum_value in enum_type.values:
dictionary[enum_value.name] = enum_value.number
def _AddClassAttributesForNestedExtensions(message_descriptor, dictionary):
"""Adds class attributes for the nested extensions."""
extension_dict = message_descriptor.extensions_by_name
for extension_name, extension_field in iteritems(extension_dict):
assert extension_name not in dictionary
dictionary[extension_name] = extension_field
def _AddInitMethod(message_descriptor, cls):
"""Adds an __init__ method to cls."""
# Create and attach message field properties to the message class.
# This can be done just once per message class, since property setters and
# getters are passed the message instance.
# This makes message instantiation extremely fast, and at the same time it
# doesn't require the creation of property objects for each message instance,
# which saves a lot of memory.
for field in message_descriptor.fields:
field_cdescriptor = cls.__descriptors[field.name]
if field.label == _LABEL_REPEATED:
if field.cpp_type == _CPPTYPE_MESSAGE:
value = RepeatedCompositeProperty(field_cdescriptor, field.message_type)
else:
value = RepeatedScalarProperty(field_cdescriptor)
elif field.cpp_type == _CPPTYPE_MESSAGE:
value = CompositeProperty(field_cdescriptor, field.message_type)
else:
value = ScalarProperty(field_cdescriptor)
setattr(cls, field.name, value)
# Attach a constant with the field number.
constant_name = field.name.upper() + '_FIELD_NUMBER'
setattr(cls, constant_name, field.number)
def Init(self, **kwargs):
"""Message constructor."""
cmessage = kwargs.pop('__cmessage', None)
if cmessage:
self._cmsg = cmessage
else:
self._cmsg = NewCMessage(message_descriptor.full_name)
# Keep a reference to the owner, as the owner keeps a reference to the
# underlying protocol buffer message.
owner = kwargs.pop('__owner', None)
if owner:
self._owner = owner
if message_descriptor.is_extendable:
self.Extensions = ExtensionDict(self)
else:
# Reference counting in the C++ code is broken and depends on
# the Extensions reference to keep this object alive during unit
# tests (see b/4856052). Remove this once b/4945904 is fixed.
self._HACK_REFCOUNTS = self
self._composite_fields = {}
for field_name, field_value in iteritems(kwargs):
field_cdescriptor = self.__descriptors.get(field_name, None)
if not field_cdescriptor:
raise ValueError('Protocol message has no "%s" field.' % field_name)
if field_cdescriptor.label == _LABEL_REPEATED:
if field_cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
field_name = getattr(self, field_name)
for val in field_value:
field_name.add().MergeFrom(val)
else:
getattr(self, field_name).extend(field_value)
elif field_cdescriptor.cpp_type == _CPPTYPE_MESSAGE:
getattr(self, field_name).MergeFrom(field_value)
else:
setattr(self, field_name, field_value)
Init.__module__ = None
Init.__doc__ = None
cls.__init__ = Init
def _IsMessageSetExtension(field):
"""Checks if a field is a message set extension."""
return (field.is_extension and
field.containing_type.has_options and
field.containing_type.GetOptions().message_set_wire_format and
field.type == _TYPE_MESSAGE and
field.message_type == field.extension_scope and
field.label == _LABEL_OPTIONAL)
def _AddMessageMethods(message_descriptor, cls):
"""Adds the methods to a protocol message class."""
if message_descriptor.is_extendable:
def ClearExtension(self, extension):
self.Extensions.ClearExtension(extension)
def HasExtension(self, extension):
return self.Extensions.HasExtension(extension)
def HasField(self, field_name):
return self._cmsg.HasField(field_name)
def ClearField(self, field_name):
child_cmessage = None
if field_name in self._composite_fields:
child_field = self._composite_fields[field_name]
del self._composite_fields[field_name]
child_cdescriptor = self.__descriptors[field_name]
# TODO(anuraag): Support clearing repeated message fields as well.
if (child_cdescriptor.label != _LABEL_REPEATED and
child_cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
child_field._owner = None
child_cmessage = child_field._cmsg
if child_cmessage is not None:
self._cmsg.ClearField(field_name, child_cmessage)
else:
self._cmsg.ClearField(field_name)
def Clear(self):
cmessages_to_release = []
for field_name, child_field in self._composite_fields.iteritems():
child_cdescriptor = self.__descriptors[field_name]
# TODO(anuraag): Support clearing repeated message fields as well.
if (child_cdescriptor.label != _LABEL_REPEATED and
child_cdescriptor.cpp_type == _CPPTYPE_MESSAGE):
child_field._owner = None
cmessages_to_release.append((child_cdescriptor, child_field._cmsg))
self._composite_fields.clear()
self._cmsg.Clear(cmessages_to_release)
def IsInitialized(self, errors=None):
if self._cmsg.IsInitialized():
return True
if errors is not None:
errors.extend(self.FindInitializationErrors());
return False
def SerializeToString(self):
if not self.IsInitialized():
raise message.EncodeError(
'Message %s is missing required fields: %s' % (
self._cmsg.full_name, ','.join(self.FindInitializationErrors())))
return self._cmsg.SerializeToString()
def SerializePartialToString(self):
return self._cmsg.SerializePartialToString()
def ParseFromString(self, serialized):
self.Clear()
self.MergeFromString(serialized)
def MergeFromString(self, serialized):
byte_size = self._cmsg.MergeFromString(serialized)
if byte_size < 0:
raise message.DecodeError('Unable to merge from string.')
return byte_size
def MergeFrom(self, msg):
if not isinstance(msg, cls):
raise TypeError(
"Parameter to MergeFrom() must be instance of same class: "
"expected %s got %s." % (cls.__name__, type(msg).__name__))
self._cmsg.MergeFrom(msg._cmsg)
def CopyFrom(self, msg):
self._cmsg.CopyFrom(msg._cmsg)
def ByteSize(self):
return self._cmsg.ByteSize()
def SetInParent(self):
return self._cmsg.SetInParent()
def ListFields(self):
all_fields = []
field_list = self._cmsg.ListFields()
fields_by_name = cls.DESCRIPTOR.fields_by_name
for is_extension, field_name in field_list:
if is_extension:
extension = cls._extensions_by_name[field_name]
all_fields.append((extension, self.Extensions[extension]))
else:
field_descriptor = fields_by_name[field_name]
all_fields.append(
(field_descriptor, getattr(self, field_name)))
all_fields.sort(key=lambda item: item[0].number)
return all_fields
def FindInitializationErrors(self):
return self._cmsg.FindInitializationErrors()
def __str__(self):
return self._cmsg.DebugString()
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, self.__class__):
return False
return self.ListFields() == other.ListFields()
def __ne__(self, other):
return not self == other
def __hash__(self):
raise TypeError('unhashable object')
def __unicode__(self):
# Lazy import to prevent circular import when text_format imports this file.
from google.protobuf import text_format
return bytestr_to_string(text_format.MessageToString(self, as_utf8=True))
# Attach the local methods to the message class.
for key, value in iteritems(locals().copy()):
if key not in ('key', 'value', '__builtins__', '__name__', '__doc__'):
setattr(cls, key, value)
# Static methods:
def RegisterExtension(extension_handle):
extension_handle.containing_type = cls.DESCRIPTOR
cls._extensions_by_name[extension_handle.full_name] = extension_handle
if _IsMessageSetExtension(extension_handle):
# MessageSet extension. Also register under type name.
cls._extensions_by_name[
extension_handle.message_type.full_name] = extension_handle
cls.RegisterExtension = staticmethod(RegisterExtension)
def FromString(string):
msg = cls()
msg.MergeFromString(string)
return msg
cls.FromString = staticmethod(FromString)
def _AddPropertiesForExtensions(message_descriptor, cls):
"""Adds properties for all fields in this protocol message type."""
extension_dict = message_descriptor.extensions_by_name
for extension_name, extension_field in iteritems(extension_dict):
constant_name = extension_name.upper() + '_FIELD_NUMBER'
setattr(cls, constant_name, extension_field.number)
| ASMlover/study | python/proto/google/protobuf/internal/cpp_message.py | Python | bsd-2-clause | 24,063 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Feed.title'
db.alter_column('feedmanager_feed', 'title', self.gf('django.db.models.fields.TextField')())
def backwards(self, orm):
# Changing field 'Feed.title'
db.alter_column('feedmanager_feed', 'title', self.gf('django.db.models.fields.CharField')(max_length=70))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'feedmanager.feed': {
'Meta': {'object_name': 'Feed'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_checked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.TextField', [], {}),
'url': ('django.db.models.fields.TextField', [], {}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'})
},
'feedmanager.item': {
'Meta': {'object_name': 'Item'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feedmanager.Feed']"}),
'guid': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.TextField', [], {}),
'pubdate': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '70'})
}
}
complete_apps = ['feedmanager']
| jacobjbollinger/sorbet | sorbet/feedmanager/migrations/0006_chg_field_feed_title.py | Python | bsd-2-clause | 5,274 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class StorageMigrationResponse(Resource):
"""Response for a migration of app content request.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:param name: Resource Name.
:type name: str
:param kind: Kind of resource.
:type kind: str
:param location: Resource Location.
:type location: str
:param type: Resource type.
:type type: str
:param tags: Resource tags.
:type tags: dict
:ivar operation_id: When server starts the migration process, it will
return an operation ID identifying that particular migration operation.
:vartype operation_id: str
"""
_validation = {
'id': {'readonly': True},
'location': {'required': True},
'operation_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'operation_id': {'key': 'properties.operationId', 'type': 'str'},
}
def __init__(self, location, name=None, kind=None, type=None, tags=None):
super(StorageMigrationResponse, self).__init__(name=name, kind=kind, location=location, type=type, tags=tags)
self.operation_id = None
| v-iam/azure-sdk-for-python | azure-mgmt-web/azure/mgmt/web/models/storage_migration_response.py | Python | mit | 1,990 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
TauDEMUtils.py
---------------------
Date : October 2012
Copyright : (C) 2012 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import object
__author__ = 'Alexander Bruy'
__date__ = 'October 2012'
__copyright__ = '(C) 2012, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import subprocess
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import QgsApplication
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.ProcessingLog import ProcessingLog
from processing.tools.system import isMac
class TauDEMUtils(object):
TAUDEM_FOLDER = 'TAUDEM_FOLDER'
TAUDEM_MULTIFILE_FOLDER = 'TAUDEM_MULTIFILE_FOLDER'
TAUDEM_USE_SINGLEFILE = 'TAUDEM_USE_SINGLEFILE'
TAUDEM_USE_MULTIFILE = 'TAUDEM_USE_MULTIFILE'
MPIEXEC_FOLDER = 'MPIEXEC_FOLDER'
MPI_PROCESSES = 'MPI_PROCESSES'
@staticmethod
def taudemPath():
folder = ProcessingConfig.getSetting(TauDEMUtils.TAUDEM_FOLDER)
if folder is None:
folder = ''
if isMac():
testfolder = os.path.join(QgsApplication.prefixPath(), 'bin')
if os.path.exists(os.path.join(testfolder, 'pitremove')):
folder = testfolder
else:
testfolder = '/usr/local/bin'
if os.path.exists(os.path.join(testfolder, 'pitremove')):
folder = testfolder
return folder
@staticmethod
def mpiexecPath():
folder = ProcessingConfig.getSetting(TauDEMUtils.MPIEXEC_FOLDER)
if folder is None:
folder = ''
if isMac():
testfolder = os.path.join(QgsApplication.prefixPath(), 'bin')
if os.path.exists(os.path.join(testfolder, 'mpiexec')):
folder = testfolder
else:
testfolder = '/usr/local/bin'
if os.path.exists(os.path.join(testfolder, 'mpiexec')):
folder = testfolder
return folder
@staticmethod
def taudemDescriptionPath():
return os.path.normpath(
os.path.join(os.path.dirname(__file__), 'description'))
@staticmethod
def executeTauDEM(command, progress):
loglines = []
loglines.append(TauDEMUtils.tr('TauDEM execution console output'))
fused_command = ''.join(['"%s" ' % c for c in command])
progress.setInfo(TauDEMUtils.tr('TauDEM command:'))
progress.setCommand(fused_command.replace('" "', ' ').strip('"'))
proc = subprocess.Popen(
fused_command,
shell=True,
stdout=subprocess.PIPE,
stdin=open(os.devnull),
stderr=subprocess.STDOUT,
universal_newlines=True,
).stdout
for line in iter(proc.readline, ''):
progress.setConsoleInfo(line)
loglines.append(line)
ProcessingLog.addToLog(ProcessingLog.LOG_INFO, loglines)
@staticmethod
def tr(string, context=''):
if context == '':
context = 'TauDEMUtils'
return QCoreApplication.translate(context, string)
| wonder-sk/QGIS | python/plugins/processing/algs/taudem/TauDEMUtils.py | Python | gpl-2.0 | 3,960 |
import foohid
import struct
import random
import time
joypad = (
0x05, 0x01,
0x09, 0x05,
0xa1, 0x01,
0xa1, 0x00,
0x05, 0x09,
0x19, 0x01,
0x29, 0x10,
0x15, 0x00,
0x25, 0x01,
0x95, 0x10,
0x75, 0x01,
0x81, 0x02,
0x05, 0x01,
0x09, 0x30,
0x09, 0x31,
0x09, 0x32,
0x09, 0x33,
0x15, 0x81,
0x25, 0x7f,
0x75, 0x08,
0x95, 0x04,
0x81, 0x02,
0xc0,
0xc0)
try:
foohid.destroy("FooHID simple joypad")
except:
pass
foohid.create("FooHID simple joypad", struct.pack('{0}B'.format(len(joypad)), *joypad), "SN 123", 2, 3)
try:
while True:
x = random.randrange(0,255)
y = random.randrange(0,255)
z = random.randrange(0,255)
rx = random.randrange(0,255)
foohid.send("FooHID simple joypad", struct.pack('H4B', 0, x, y, z, rx))
time.sleep(1)
except KeyboardInterrupt:
foohid.destroy("FooHID simple joypad") | unbit/foohid-py | test_joypad.py | Python | mit | 945 |
"""
Test compiling and executing using the dmd tool.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
from Common.singleStringCannotBeMultipleOptions import testForTool
testForTool('dmd')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| timj/scons | test/D/HSTeoh/sconstest-singleStringCannotBeMultipleOptions_dmd.py | Python | mit | 1,396 |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../hdate'))
# -- Project information -----------------------------------------------------
project = 'libhdate'
copyright = '2020, Royi Reshef'
author = 'Royi Reshef'
# The full version, including alpha/beta/rc tags
release = '0.9.12'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static'] | tsvi/py-libhdate | docs/source/conf.py | Python | gpl-3.0 | 1,977 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that the TensorFlow parts of the LSTM example run."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.timeseries.examples import lstm
from tensorflow.python.estimator import estimator_lib
from tensorflow.python.platform import test
class _SeedRunConfig(estimator_lib.RunConfig):
@property
def tf_random_seed(self):
return 3
class LSTMExampleTest(test.TestCase):
def test_periodicity_learned(self):
(observed_times, observed_values,
all_times, predicted_values) = lstm.train_and_predict(
training_steps=2, estimator_config=_SeedRunConfig(),
export_directory=self.get_temp_dir())
self.assertAllEqual([100], observed_times.shape)
self.assertAllEqual([100, 5], observed_values.shape)
self.assertAllEqual([200], all_times.shape)
self.assertAllEqual([200, 5], predicted_values.shape)
# TODO(allenl): Make the model deterministic so you can check something
# substantive.
if __name__ == "__main__":
test.main()
| ghchinoy/tensorflow | tensorflow/contrib/timeseries/examples/lstm_test.py | Python | apache-2.0 | 1,760 |
from django.db import models
from django.db import connection, transaction
from .PUC import PUC
from .common_info import CommonInfo
from .product import Product
from django.utils.translation import ugettext_lazy as _
DEFAULT_CLASSIFICATION_METHOD_CODE = "MA"
class ProductToPUC(CommonInfo):
"""
Each product can be assigned to multiple PUCs, using different classification
methods. The user-facing features in Factotum abstract away the multiple PUCs
in favor of the "uberpuc," or the PUC that was assigned with the most-reliable
classification method.
"""
product = models.ForeignKey(Product, on_delete=models.CASCADE)
puc = models.ForeignKey(PUC, on_delete=models.CASCADE)
puc_assigned_usr = models.ForeignKey(
"auth.User", on_delete=models.SET_NULL, null=True, blank=True
)
puc_assigned_script = models.ForeignKey(
"Script", on_delete=models.SET_NULL, null=True, blank=True
)
classification_method = models.ForeignKey(
"ProductToPucClassificationMethod",
max_length=3,
on_delete=models.PROTECT,
null=False,
blank=False,
default=DEFAULT_CLASSIFICATION_METHOD_CODE,
)
classification_confidence = models.DecimalField(
max_digits=6, decimal_places=3, default=1, null=True, blank=True
)
is_uber_puc = models.BooleanField(default=False, db_index=True)
def __str__(self):
return f"{self.product} --> {self.puc}"
@transaction.atomic()
def update_uber_puc(self):
"""
Run the UPDATE query on all the dashboard_producttopuc records
that share this one's product_id.
"""
uberpuc_update_sql = """
UPDATE
dashboard_producttopuc ptp
LEFT JOIN
dashboard_producttopucclassificationmethod cm ON cm.id = ptp.classification_method_id
LEFT JOIN (
SELECT
ptp.product_id AS product_id,
ptp.puc_id AS puc_id,
ptp.classification_method_id AS classification_method_id,
cm.rank AS rank
FROM
dashboard_producttopuc ptp
LEFT JOIN dashboard_producttopucclassificationmethod cm ON cm.id = ptp.classification_method_id
) ptp_rank ON ptp.product_id = ptp_rank.product_id AND cm.rank > ptp_rank.rank
SET is_uber_puc = ptp_rank.rank IS NULL
"""
with connection.cursor() as cursor:
cursor.execute(
uberpuc_update_sql + f" WHERE ptp.product_id = {self.product_id}"
)
class Meta:
unique_together = ("product", "puc", "classification_method")
def shorthand(self):
return f"{self.product_id} -> {self.puc_id} || {self.classification_method_id} {'*' if self.is_uber_puc else ''}"
class ProductToPucClassificationMethodManager(models.Manager):
def get_by_natural_key(self, code):
return self.get(code=code)
class ProductToPucClassificationMethod(CommonInfo):
"""
PUCs can be assigned to products using a variety of methods. This
model stores the classification methods and ranks them by their
trustworthiness. A product may not be assigned to more than one
PUC with the same classification method.
The related PUC with the highest-ranked classification method
becomes the product's "uberpuc."
"""
code = models.CharField(
max_length=3,
primary_key=True,
verbose_name="classification method code",
db_column="id",
)
name = models.CharField(
max_length=100, unique=True, verbose_name="classification method name"
)
rank = models.PositiveSmallIntegerField(
unique=True, verbose_name="classification method rank"
)
description = models.TextField(blank=True)
def natural_key(self):
return (self.code,)
def get_by_natural_key(self, code):
return self.get(code=code)
objects = ProductToPucClassificationMethodManager()
class Meta:
ordering = ["rank"]
verbose_name = _("PUC classification method")
verbose_name_plural = _("PUC classification methods")
def __str__(self):
return self.code
| HumanExposure/factotum | dashboard/models/product_to_puc.py | Python | gpl-3.0 | 4,278 |
from django.conf import settings as config
def settings(request):
""" Give access to some of the application settings"""
return {
'DOMAIN': config.DOMAIN,
'APPLICATION_TITLE': config.APPLICATION_TITLE,
'COMPANY_NAME': config.COMPANY_NAME,
}
def next(request):
"""Make {{ NEXT }} available"""
if 'next' in request.GET:
return { 'NEXT': request.GET['next'] }
elif 'next' in request.POST:
return { 'NEXT': request.POST['next'] }
else:
return { 'NEXT': request.path }
| MitMaro/The-Blame-Game | context_processors.py | Python | mit | 485 |
'''
https://leetcode.com/problems/number-of-submatrices-that-sum-to-target/
Algorithm:
1. Make cumulative summed rows - N * N steps
2. Use 1 to go over all rows fixing right and left columns of the matrix - N steps
'''
class Solution:
def numSubmatrixSumTarget(self, matrix: List[List[int]], target: int) -> int:
mat = [r[::] for r in matrix]
n, m = len(matrix), len(matrix[0])
for i in range(n):
for j in range(1, m):
mat[i][j] += mat[i][j - 1]
c = 0
for i in range(m):
for j in range(i, m):
sums = {0:1}
s = 0
for k in range(n):
s += mat[k][j] - (mat[k][i - 1] if i > 0 else 0)
diff = s - target
if diff in sums:
c += sums[diff]
if s not in sums: sums[s] = 1
else: sums[s] += 1
return c
| jan25/code_sorted | leetcode/target_sum_submatrices.py | Python | unlicense | 1,006 |
# -*- coding: utf-8 -*-
##############################################################################################
# This file is deprecated because Python 2.x is deprecated #
# A Python 3.x version of this file can be found at: #
# #
# https://github.com/Guymer/PyGuymer3/blob/master/MPLS/load_PlayListMark.py #
##############################################################################################
def load_PlayListMark(fobj):
# NOTE: see https://github.com/lerks/BluRay/wiki/PlayListMark
# Import modules ...
import struct
# Initialize variables ...
ans = {}
length3 = 0 # [B]
# Read the binary data ...
ans[u"Length"], = struct.unpack(u">I", fobj.read(4))
ans[u"NumberOfPlayListMarks"], = struct.unpack(u">H", fobj.read(2)); length3 += 2
ans[u"PlayListMarks"] = []
for i in xrange(ans[u"NumberOfPlayListMarks"]):
tmp = {}
fobj.read(1); length3 += 1
tmp[u"MarkType"], = struct.unpack(u">B", fobj.read(1)); length3 += 1
tmp[u"RefToPlayItemID"], = struct.unpack(u">H", fobj.read(2)); length3 += 2
tmp[u"MarkTimeStamp"], = struct.unpack(u">I", fobj.read(4)); length3 += 4
tmp[u"EntryESPID"], = struct.unpack(u">H", fobj.read(2)); length3 += 2
tmp[u"Duration"], = struct.unpack(u">I", fobj.read(4)); length3 += 4
ans[u"PlayListMarks"].append(tmp)
# Pad out the read ...
if length3 != ans[u"Length"]:
l = ans[u"Length"] - length3 # [B]
fobj.read(l); length3 += l
# Return answer ...
return ans, length3
| Guymer/PyGuymer | MPLS/load_PlayListMark.py | Python | apache-2.0 | 2,438 |
from dtest import Tester
import os, sys, time
from ccmlib.cluster import Cluster
from tools import require, since
from jmxutils import make_mbean, JolokiaAgent
class TestDeletion(Tester):
def gc_test(self):
""" Test that tombstone are fully purge after gc_grace """
cluster = self.cluster
cluster.populate(1).start()
[node1] = cluster.nodelist()
time.sleep(.5)
cursor = self.patient_cql_connection(node1)
self.create_ks(cursor, 'ks', 1)
self.create_cf(cursor, 'cf', gc_grace=0, key_type='int', columns={'c1': 'int'})
cursor.execute('insert into cf (key, c1) values (1,1)')
cursor.execute('insert into cf (key, c1) values (2,1)')
node1.flush()
result = cursor.execute('select * from cf;')
assert len(result) == 2 and len(result[0]) == 2 and len(result[1]) == 2, result
cursor.execute('delete from cf where key=1')
result = cursor.execute('select * from cf;')
if cluster.version() < '1.2': # > 1.2 doesn't show tombstones
assert len(result) == 2 and len(result[0]) == 1 and len(result[1]) == 1, result
node1.flush()
time.sleep(.5)
node1.compact()
time.sleep(.5)
result = cursor.execute('select * from cf;')
assert len(result) == 1 and len(result[0]) == 2, result
@require(9194)
def tombstone_size_test(self):
self.cluster.populate(1).start(wait_for_binary_proto=True)
[node1] = self.cluster.nodelist()
cursor = self.patient_cql_connection(node1)
self.create_ks(cursor, 'ks', 1)
cursor.execute('CREATE TABLE test (i int PRIMARY KEY)')
stmt = cursor.prepare('DELETE FROM test where i = ?')
for i in range(100):
cursor.execute(stmt, [i])
self.assertEqual(memtable_count(node1, 'ks', 'test'), 100)
self.assertGreater(memtable_size(node1, 'ks', 'test'), 0)
def memtable_size(node, keyspace, table):
new_name = node.get_cassandra_version() >= '2.1'
name = 'MemtableLiveDataSize' if new_name else 'MemtableDataSize'
return columnfamily_metric(node, keyspace, table, name)
def memtable_count(node, keyspace, table):
return columnfamily_metric(node, keyspace, table, 'MemtableColumnsCount')
def columnfamily_metric(node, keyspace, table, name):
with JolokiaAgent(node) as jmx:
mbean = make_mbean('metrics', type='ColumnFamily',
name=name, keyspace=keyspace, scope=table)
value = jmx.read_attribute(mbean, 'Value')
return value
| tjake/cassandra-dtest | deletion_test.py | Python | apache-2.0 | 2,588 |
# -*- coding: utf-8 -*-
#--------------------------------------------------------------------#
# This file is part of Py-notify. #
# #
# Copyright (C) 2006, 2007, 2008 Paul Pogonyshev. #
# #
# This library is free software; you can redistribute it and/or #
# modify it under the terms of the GNU Lesser General Public License #
# as published by the Free Software Foundation; either version 2.1 #
# of the License, or (at your option) any later version. #
# #
# This library is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# Lesser General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with this library; if not, write to the Free #
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor, #
# Boston, MA 02110-1301 USA #
#--------------------------------------------------------------------#
"""
A collection of utilities that can also be used from outside, if wanted. Functions and
classes here can be assumed public and won’t disappear in future Py-notify versions.
@var is_callable:
Determine if C{object} is callable. E.g. if it is a function, method, class, instance of
a class with C{__call__}, etc. This is the same as built-in function C{callable} does.
C{is_callable} is provided since C{callable} is going to disappear in Python 3000 and may
issue warnings in 2.6.
@var as_string:
Convert any attribute to its name as string. Main use of this utility object is to
perform Python ‘private’ identifier mangling. E.g. you can write::
class MyClass (object):
__slots__ = ('__x')
def get_x (self):
if hasattr (self, as_string.__x):
return self.__x
Advantage is that you don’t have to do mangling ‘by hands’ and hence there is less chance
for a typing error. Furthermore, this code does not require changes if you change
C{MyClass} name to anything else, whereas custom mangling does.
However, usefulness of ‘as_string’ is still doubtful. When I wrote it, I didn’t know one
could just write ``__slots__ = ('__x')``, I thought it needed to be
``__slots__ = ('_MyClass__x')``. Imagine...
"""
__docformat__ = 'epytext en'
__all__ = ('is_callable', 'is_valid_identifier', 'mangle_identifier',
'as_string',
'raise_not_implemented_exception',
'execute',
'frozendict', 'DummyReference', 'ClassTypes', 'StringType')
import re
import sys
import types
from keyword import iskeyword
if sys.version_info[:3] < (2, 6, 0):
is_callable = callable
else:
def is_callable (object):
return hasattr (object, '__call__')
def is_valid_identifier (identifier):
"""
Determine if C{identifier} is a valid Python identifier. This function never raises
any exceptions. If C{identifier} is not a string, it simply returns C{False}.
@param identifier: identifier to determin if it is valid
@type identifier: C{basestring}
@rtype: C{bool}
"""
return (isinstance (identifier, StringType)
and re.match ('^[_a-zA-Z][_a-zA-Z0-9]*$', identifier) is not None
and not iskeyword (identifier))
def mangle_identifier (class_name, identifier):
"""
Mangle C{identifier} as how would be done if it appeared in a class with
C{class_name}. This function allows to mimic standard Python mangling of
pseudo-private attributes, i.e. those which names start with two underscores and don’t
end in two. If C{identifier} is not considered a private name, it is returned
unchanged.
@param class_name: name of Python class.
@type class_name: C{basestring}
@param identifier: name of an attribute of that class.
@type identifier: C{basestring}
@rtype: C{str}
@raises ValueError: if either C{class_name} or C{identifier} is not valid from
Python’s point of view.
"""
if not (is_valid_identifier (class_name) and is_valid_identifier (identifier)):
raise ValueError ("'class_name' and 'identifier' must be valid Python identifiers")
if (identifier.startswith ('__')
and not identifier.endswith ('__')
and class_name != '_' * len (class_name)):
return '_%s%s' % (class_name.lstrip ('_'), identifier)
else:
return identifier
class _AsString (object):
"""
Internal helper class for C{L{as_string}}. Don’t use directly.
"""
__slots__ = ()
def __getattribute__(self, name):
return name
def __setattr__(self, name, value):
raise TypeError ("'as_string' attributes cannot be set")
def __delattr__(self, name):
raise TypeError ("'as_string' attributes cannot be deleted")
def __repr__(self):
return 'notify.utils.as_string'
as_string = _AsString ()
def raise_not_implemented_exception (object = None, function_name = None):
"""
Raise C{NotImplementedError} for a method invoked with C{object} as C{self}. The
function determines object class and method declaration class(es) itself and that’s
the whole point of it.
It should be called like this:
>>> raise_not_implemented_exception (self)
And output might look like this::
File ".../foo.py", line # in ?
Foo ().bar ()
File ".../foo.py", line #, in bar
raise_not_implemented_exception (self)
File ".../notify/utils.py", line #, in raise_not_implemented_exception
raise exception
NotImplementedError: bar() not implemented in class Foo (declared in AbstractFoo)
Optionally, C{function_name} can be specified. This argument mainly exists for C
extension, since function name cannot be detected automatically in this case. In
Python code you should just leave this argument out.
@param object: the object for which a non-implemented method is called.
@type object: C{object}
@param function_name: name of the unimplemented function or method (inferred
automatically for non-extension functions).
@type function_name: C{basestring} or C{None}
@raises NotImplementedError: always.
"""
if function_name is None:
try:
raise Exception
except Exception:
try:
traceback = sys.exc_info () [2]
function_name = traceback.tb_frame.f_back.f_code.co_name
except Exception:
# We can do nothing, ignore.
pass
if function_name is not None:
function_description = '%s()' % function_name
else:
function_description = 'UNKNOWN FUNCTION'
try:
class_description = ' in class %s' % object.__class__.__name__
if function_name is not None:
declaration_classes = _find_declaration_classes (object.__class__, function_name)
if len (declaration_classes) == 1:
if declaration_classes[0] is not object.__class__:
class_description += ' (declared in %s)' % declaration_classes[0].__name__
elif len (declaration_classes) > 1:
class_description += (' (declared in %s)'
% ', '.join ([_class.__name__
for _class in declaration_classes]))
except Exception:
class_description = ''
exception = NotImplementedError ('%s not implemented%s'
% (function_description, class_description))
raise exception
def _find_declaration_classes (_class, function_name):
declaring_bases = [base for base in _class.__bases__ if hasattr (base, function_name)]
if declaring_bases:
return reduce (lambda list1, list2: list1 + list2,
[_find_declaration_classes (base, function_name)
for base in declaring_bases],
[])
else:
return [_class]
if sys.version_info[0] >= 3:
execute = eval ('exec')
else:
from notify._2_x import execute
class frozendict (dict):
__slots__ = ('__hash')
def __init__(self, *arguments, **keywords):
super (frozendict, self).__init__(*arguments, **keywords)
self.__hash = None
def clear (self):
raise TypeError ("'%s' object doesn't support clearing" % type (self).__name__)
def pop (self, key, default = None):
raise TypeError ("'%s' object doesn't support popping" % type (self).__name__)
def popitem (self):
raise TypeError ("'%s' object doesn't support popping" % type (self).__name__)
def setdefault (self, key, default = None):
raise TypeError ("'%s' object doesn't support setdefault operation" % type (self).__name__)
def update (self, dict):
raise TypeError ("'%s' object doesn't support updating" % type (self).__name__)
def __setitem__(self, key, value):
raise TypeError ("'%s' object doesn't support item setting" % type (self).__name__)
def __delitem__(self, key):
raise TypeError ("'%s' object doesn't support item deletion" % type (self).__name__)
def __hash__(self):
_hash = self.__hash
if _hash is None:
_hash = 0x1337
if hasattr (dict, 'iteritems'):
for key, value in self.iteritems ():
_hash ^= hash (key) ^ hash (value)
else:
for key, value in self.items ():
_hash ^= hash (key) ^ hash (value)
self.__hash = _hash
return _hash
def __repr__(self):
return '%s (%s)' % (type (self).__name__, super (frozendict, self).__repr__())
frozendict.EMPTY = frozendict ({ })
# Force hash to be precomputed.
hash (frozendict.EMPTY)
class DummyReference (object):
"""
Simple class that is interface-compatible with C{weakref.ReferenceType}. In other
words, its constructor accepts only one parameter and this value is later returned
from C{L{__call__}} method. Unlike weak references, instances of this class don’t do
anything special. They are only needed to avoid special cases for non-references,
since you can treat instances of C{weakref.ReferenceType} and this class in the same
way.
"""
__slots__ = ('__object')
def __init__(self, object):
"""
Create a new dummy reference that will return C{object} when called.
@param object: the object that will be returned by this reference.
@type object: C{object}
"""
self.__object = object
def __call__(self):
"""
Return the C{object} specified at construction time.
@rtype: C{object}
"""
return self.__object
def __repr__(self):
return ('<%s.%s at 0x%x; to %r>'
% (self.__module__, self.__class__.__name__, id (self), self.__object))
def __str__(self):
return '<%s at 0x%x; to %s>' % (self.__class__.__name__, id (self), self.__object)
if sys.version_info[0] >= 3:
ClassTypes = (type,)
StringType = str
else:
ClassTypes = (type, types.ClassType)
StringType = basestring
# Local variables:
# mode: python
# python-indent: 4
# indent-tabs-mode: nil
# fill-column: 90
# End:
| berinhard/py-notify | notify/utils.py | Python | lgpl-2.1 | 11,952 |
# ******************************************************************************
# Copyright 2017-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import numpy as np
import pytest
import json
import ngraph as ng
from ngraph.impl import Function
from ngraph.exceptions import UserInputError
import test
from test.ngraph.util import get_runtime, run_op_node
def test_ngraph_function_api():
shape = [2, 2]
parameter_a = ng.parameter(shape, dtype=np.float32, name="A")
parameter_b = ng.parameter(shape, dtype=np.float32, name="B")
parameter_c = ng.parameter(shape, dtype=np.float32, name="C")
model = (parameter_a + parameter_b) * parameter_c
function = Function(model, [parameter_a, parameter_b, parameter_c], "TestFunction")
ordered_ops = function.get_ordered_ops()
op_types = [op.get_type_name() for op in ordered_ops]
assert op_types == ["Parameter", "Parameter", "Parameter", "Add", "Multiply", "Result"]
assert len(function.get_ops()) == 6
assert function.get_output_size() == 1
assert function.get_output_op(0).get_type_name() == "Result"
assert function.get_output_element_type(0) == parameter_a.get_output_element_type(0)
assert list(function.get_output_shape(0)) == [2, 2]
assert len(function.get_parameters()) == 3
assert len(function.get_results()) == 1
assert function.get_name() == "TestFunction"
@pytest.mark.parametrize(
"dtype",
[
np.float32,
np.float64,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
],
)
def test_simple_computation_on_ndarrays(dtype):
runtime = get_runtime()
shape = [2, 2]
parameter_a = ng.parameter(shape, dtype=dtype, name="A")
parameter_b = ng.parameter(shape, dtype=dtype, name="B")
parameter_c = ng.parameter(shape, dtype=dtype, name="C")
model = (parameter_a + parameter_b) * parameter_c
computation = runtime.computation(model, parameter_a, parameter_b, parameter_c)
value_a = np.array([[1, 2], [3, 4]], dtype=dtype)
value_b = np.array([[5, 6], [7, 8]], dtype=dtype)
value_c = np.array([[9, 10], [11, 12]], dtype=dtype)
result = computation(value_a, value_b, value_c)
assert np.allclose(result, np.array([[54, 80], [110, 144]], dtype=dtype))
value_a = np.array([[13, 14], [15, 16]], dtype=dtype)
value_b = np.array([[17, 18], [19, 20]], dtype=dtype)
value_c = np.array([[21, 22], [23, 24]], dtype=dtype)
result = computation(value_a, value_b, value_c)
assert np.allclose(result, np.array([[630, 704], [782, 864]], dtype=dtype))
def test_serialization():
dtype = np.float32
backend_name = test.BACKEND_NAME
shape = [2, 2]
parameter_a = ng.parameter(shape, dtype=dtype, name="A")
parameter_b = ng.parameter(shape, dtype=dtype, name="B")
parameter_c = ng.parameter(shape, dtype=dtype, name="C")
model = (parameter_a + parameter_b) * parameter_c
runtime = ng.runtime(backend_name=backend_name)
computation = runtime.computation(model, parameter_a, parameter_b, parameter_c)
try:
serialized = computation.serialize(2)
serial_json = json.loads(serialized)
assert serial_json[0]["name"] != ""
assert 10 == len(serial_json[0]["ops"])
except Exception:
pass
def test_broadcast_1():
input_data = np.array([1, 2, 3])
new_shape = [3, 3]
expected = [[1, 2, 3], [1, 2, 3], [1, 2, 3]]
result = run_op_node([input_data], ng.broadcast, new_shape)
assert np.allclose(result, expected)
def test_broadcast_2():
input_data = np.arange(4)
new_shape = [3, 4, 2, 4]
expected = np.broadcast_to(input_data, new_shape)
result = run_op_node([input_data], ng.broadcast, new_shape)
assert np.allclose(result, expected)
def test_broadcast_3():
input_data = np.array([1, 2, 3])
new_shape = [3, 3]
axis_mapping = [0]
expected = [[1, 1, 1], [2, 2, 2], [3, 3, 3]]
result = run_op_node([input_data], ng.broadcast, new_shape, axis_mapping, "EXPLICIT")
assert np.allclose(result, expected)
@pytest.mark.parametrize(
"destination_type, input_data",
[(bool, np.zeros((2, 2), dtype=int)), ("boolean", np.zeros((2, 2), dtype=int))],
)
def test_convert_to_bool(destination_type, input_data):
expected = np.array(input_data, dtype=bool)
result = run_op_node([input_data], ng.convert, destination_type)
assert np.allclose(result, expected)
assert np.array(result).dtype == bool
@pytest.mark.parametrize(
"destination_type, rand_range, in_dtype, expected_type",
[
(np.float32, (-8, 8), np.int32, np.float32),
(np.float64, (-16383, 16383), np.int64, np.float64),
("f32", (-8, 8), np.int32, np.float32),
("f64", (-16383, 16383), np.int64, np.float64),
],
)
def test_convert_to_float(destination_type, rand_range, in_dtype, expected_type):
np.random.seed(133391)
input_data = np.random.randint(*rand_range, size=(2, 2), dtype=in_dtype)
expected = np.array(input_data, dtype=expected_type)
result = run_op_node([input_data], ng.convert, destination_type)
assert np.allclose(result, expected)
assert np.array(result).dtype == expected_type
@pytest.mark.parametrize(
"destination_type, expected_type",
[
(np.int8, np.int8),
(np.int16, np.int16),
(np.int32, np.int32),
(np.int64, np.int64),
("i8", np.int8),
("i16", np.int16),
("i32", np.int32),
("i64", np.int64),
],
)
def test_convert_to_int(destination_type, expected_type):
np.random.seed(133391)
input_data = np.ceil(-8 + np.random.rand(2, 3, 4) * 16)
expected = np.array(input_data, dtype=expected_type)
result = run_op_node([input_data], ng.convert, destination_type)
assert np.allclose(result, expected)
assert np.array(result).dtype == expected_type
@pytest.mark.parametrize(
"destination_type, expected_type",
[
(np.uint8, np.uint8),
(np.uint16, np.uint16),
(np.uint32, np.uint32),
(np.uint64, np.uint64),
("u8", np.uint8),
("u16", np.uint16),
("u32", np.uint32),
("u64", np.uint64),
],
)
def test_convert_to_uint(destination_type, expected_type):
np.random.seed(133391)
input_data = np.ceil(np.random.rand(2, 3, 4) * 16)
expected = np.array(input_data, dtype=expected_type)
result = run_op_node([input_data], ng.convert, destination_type)
assert np.allclose(result, expected)
assert np.array(result).dtype == expected_type
def test_bad_data_shape():
A = ng.parameter(shape=[2, 2], name="A", dtype=np.float32)
B = ng.parameter(shape=[2, 2], name="B")
model = A + B
runtime = ng.runtime(backend_name="INTERPRETER")
computation = runtime.computation(model, A, B)
value_a = np.array([[1, 2]], dtype=np.float32)
value_b = np.array([[5, 6], [7, 8]], dtype=np.float32)
with pytest.raises(UserInputError):
computation(value_a, value_b)
def test_constant_get_data_bool():
input_data = np.array([True, False, False, True])
node = ng.constant(input_data, dtype=np.bool)
retrieved_data = node.get_data()
assert np.allclose(input_data, retrieved_data)
@pytest.mark.parametrize("data_type", [np.float32, np.float64])
def test_constant_get_data_floating_point(data_type):
np.random.seed(133391)
input_data = np.random.randn(2, 3, 4).astype(data_type)
min_value = -1.0e20
max_value = 1.0e20
input_data = min_value + input_data * max_value * data_type(2)
node = ng.constant(input_data, dtype=data_type)
retrieved_data = node.get_data()
assert np.allclose(input_data, retrieved_data)
@pytest.mark.parametrize("data_type", [np.int64, np.int32, np.int16, np.int8])
def test_constant_get_data_signed_integer(data_type):
np.random.seed(133391)
input_data = np.random.randint(np.iinfo(data_type).min, np.iinfo(data_type).max,
size=[2, 3, 4], dtype=data_type)
node = ng.constant(input_data, dtype=data_type)
retrieved_data = node.get_data()
assert np.allclose(input_data, retrieved_data)
@pytest.mark.parametrize("data_type", [np.uint64, np.uint32, np.uint16, np.uint8])
def test_constant_get_data_unsigned_integer(data_type):
np.random.seed(133391)
input_data = np.random.randn(2, 3, 4).astype(data_type)
input_data = (
np.iinfo(data_type).min
+ input_data * np.iinfo(data_type).max
+ input_data * np.iinfo(data_type).max
)
node = ng.constant(input_data, dtype=data_type)
retrieved_data = node.get_data()
assert np.allclose(input_data, retrieved_data)
def test_result():
node = [[11, 10], [1, 8], [3, 4]]
result = test.ngraph.util.run_op_node([node], ng.ops.result)
assert np.allclose(result, node)
| NervanaSystems/ngraph | python/test/ngraph/test_basic.py | Python | apache-2.0 | 9,480 |
import hashlib
import os
import shutil
import stat
import time
from django.conf import settings
from django.core.cache import cache
import commonware.log
import cronjobs
from files.models import FileValidation
log = commonware.log.getLogger('z.cron')
@cronjobs.register
def cleanup_extracted_file():
log.info('Removing extracted files for file viewer.')
root = os.path.join(settings.TMP_PATH, 'file_viewer')
for path in os.listdir(root):
full = os.path.join(root, path)
age = time.time() - os.stat(full)[stat.ST_ATIME]
if (age) > (60 * 60):
log.debug('Removing extracted files: %s, %dsecs old.' %
(full, age))
shutil.rmtree(full)
# Nuke out the file and diff caches when the file gets removed.
id = os.path.basename(path)
try:
int(id)
except ValueError:
continue
key = hashlib.md5()
key.update(str(id))
cache.delete('%s:memoize:%s:%s' % (settings.CACHE_PREFIX,
'file-viewer', key.hexdigest()))
@cronjobs.register
def cleanup_validation_results():
"""Will remove all validation results. Used when the validator is
upgraded and results may no longer be relevant."""
# With a large enough number of objects not using no_cache() tracebacks
all = FileValidation.objects.no_cache().all()
log.info('Removing %s old validation results.' % (all.count()))
all.delete()
| jinankjain/zamboni | apps/files/cron.py | Python | bsd-3-clause | 1,539 |
#ImportModules
import ShareYourSystem as SYS
#Define and set a dict
MySetter=SYS.SetterClass(
).set(
'set',
{
'#liarg':('MyRedirectStr','MyStr')
}
).set(
'set',
{
'#liarg':'MyFirstStr',
'#kwarg':{'SettingValueVariable':'SettingValueVariable'}
}
).set(
'set',
{
'#liarg:#map@get':['MyRedirectStr','MyFirstStr'],
#'#kwarg':{'SettingValueVariable':'salut'}
}
).set(
'set',
{
'#liarg':['MyInt'],
'#kwarg':{'SettingValueVariable':2}
}
).set(
'set',
{
'#liarg':['MyThirdStr'],
'#kwarg:#map@get:#key':{'MyFirstStr':'allo!'}
}
).set(
'set',
{
'#liarg':['MyFourStr'],
'#kwarg:#map@get:#value':{'SettingValueVariable':'MyStr'}
}
).set(
'set',
{
'#liarg':['MyFifthStr'],
'#kwarg:#map@get:#key:value':{'MyFirstStr':'MyFourStr'}
}
)
#print
print('MySetter is ')
SYS._print(MySetter)
| Ledoux/ShareYourSystem | Pythonlogy/build/lib/ShareYourSystem/Standards/Itemizers/Setter/15_ExampleDoc.py | Python | mit | 862 |
# Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
# internal
import stix
import stix.bindings.stix_common as common_binding
# relative
from .vocabs import VocabString
class Names(stix.EntityList):
_namespace = "http://stix.mitre.org/common-1"
_binding = common_binding
_binding_class = _binding.NamesType
_contained_type = VocabString
_binding_var = 'Name'
_inner_name = 'names'
_dict_as_list = True
| chriskiehl/python-stix | stix/common/names.py | Python | bsd-3-clause | 486 |
import unittest
import lxml
import lxml.etree
from pywps.app import Process, Service
from pywps import WPS, OWS
from tests.common import assert_pywps_version, client_for
class BadRequestTest(unittest.TestCase):
def test_bad_http_verb(self):
client = client_for(Service())
resp = client.put('')
assert resp.status_code == 405 # method not allowed
def test_bad_request_type_with_get(self):
client = client_for(Service())
resp = client.get('?Request=foo')
assert resp.status_code == 400
def test_bad_service_type_with_get(self):
client = client_for(Service())
resp = client.get('?service=foo')
exception = resp.xpath('/ows:ExceptionReport'
'/ows:Exception')
assert resp.status_code == 400
assert exception[0].attrib['exceptionCode'] == 'InvalidParameterValue'
def test_bad_request_type_with_post(self):
client = client_for(Service())
request_doc = WPS.Foo()
resp = client.post_xml('', doc=request_doc)
assert resp.status_code == 400
class CapabilitiesTest(unittest.TestCase):
def setUp(self):
def pr1(): pass
def pr2(): pass
self.client = client_for(Service(processes=[Process(pr1, 'pr1', 'Process 1'), Process(pr2, 'pr2', 'Process 2')]))
def check_capabilities_response(self, resp):
assert resp.status_code == 200
assert resp.headers['Content-Type'] == 'text/xml'
title = resp.xpath_text('/wps:Capabilities'
'/ows:ServiceIdentification'
'/ows:Title')
assert title != ''
names = resp.xpath_text('/wps:Capabilities'
'/wps:ProcessOfferings'
'/wps:Process'
'/ows:Identifier')
assert sorted(names.split()) == ['pr1', 'pr2']
def test_get_request(self):
resp = self.client.get('?Request=GetCapabilities&service=WpS')
self.check_capabilities_response(resp)
# case insesitive check
resp = self.client.get('?request=getcapabilities&service=wps')
self.check_capabilities_response(resp)
def test_post_request(self):
request_doc = WPS.GetCapabilities()
resp = self.client.post_xml(doc=request_doc)
self.check_capabilities_response(resp)
def test_get_bad_version(self):
resp = self.client.get('?request=getcapabilities&service=wps&acceptversions=2001-123')
exception = resp.xpath('/ows:ExceptionReport'
'/ows:Exception')
assert resp.status_code == 400
assert exception[0].attrib['exceptionCode'] == 'VersionNegotiationFailed'
def test_post_bad_version(self):
acceptedVersions_doc = OWS.AcceptVersions(
OWS.Version('2001-123'))
request_doc = WPS.GetCapabilities(acceptedVersions_doc)
resp = self.client.post_xml(doc=request_doc)
exception = resp.xpath('/ows:ExceptionReport'
'/ows:Exception')
assert resp.status_code == 400
assert exception[0].attrib['exceptionCode'] == 'VersionNegotiationFailed'
def test_pywps_version(self):
resp = self.client.get('?service=WPS&request=GetCapabilities')
assert_pywps_version(resp)
def load_tests(loader=None, tests=None, pattern=None):
if not loader:
loader = unittest.TestLoader()
suite_list = [
loader.loadTestsFromTestCase(BadRequestTest),
loader.loadTestsFromTestCase(CapabilitiesTest),
]
return unittest.TestSuite(suite_list)
| jachym/PyWPS | tests/test_capabilities.py | Python | mit | 3,680 |
# # -*- coding: utf-8 -*-
# from django.contrib.auth import get_user_model
# from django.test import TransactionTestCase
# from net_promoter_score.forms import UserScoreForm
# from net_promoter_score.models import UserScore, score_group
# class UserScoreFormTests(TransactionTestCase):
# """Test suite for promoter score forms."""
# def setUp(self):
# self.user = get_user_model().objects.create_user('zoidberg')
# def validate_form(self, data):
# form = UserScoreForm(data=data, user=self.user)
# return form.is_valid()
# def test_clean_valid_score(self):
# for i in xrange(0, 11):
# self.assertTrue(self.validate_form(data={'score': i}))
# def test_clean_invalid_score(self):
# for i in (-2, 11, "", None):
# self.assertFalse(self.validate_form(data={'score': i}))
# def test_clean_unicode_reason(self):
# data={'score': 0, 'reason': u"√" * 512}
# self.assertTrue(self.validate_form(data=data))
# def test_clean_invalid_reason(self):
# data={'score': 0, 'reason': u"√" * 513}
# self.assertFalse(self.validate_form(data))
# def test_save(self):
# data = {'score': 0, 'reason': u"∂ƒ©˙∆˚¬"}
# form = UserScoreForm(data=data, user=self.user)
# score = form.save()
# self.assertIsNotNone(score)
# self.assertEqual(score.user, self.user)
# self.assertEqual(score.score, 0)
# self.assertEqual(score.reason, data['reason']) | hugorodgerbrown/django-hipchat | hipchat/tests/test_forms.py | Python | mit | 1,525 |
from __future__ import unicode_literals
def execute():
"""Make standard print formats readonly for system manager"""
import webnotes.model.doc
new_perms = [
{
'parent': 'Print Format',
'parentfield': 'permissions',
'parenttype': 'DocType',
'role': 'System Manager',
'permlevel': 1,
'read': 1,
},
{
'parent': 'Print Format',
'parentfield': 'permissions',
'parenttype': 'DocType',
'role': 'Administrator',
'permlevel': 1,
'read': 1,
'write': 1
},
]
for perms in new_perms:
doc = webnotes.model.doc.Document('DocPerm')
doc.fields.update(perms)
doc.save()
webnotes.conn.commit()
webnotes.conn.begin()
webnotes.reload_doc('core', 'doctype', 'print_format') | gangadhar-kadam/mtn-erpnext | patches/may_2012/std_pf_readonly.py | Python | agpl-3.0 | 718 |
"""Tests for the `eofs.tools` package."""
# (c) Copyright 2013-2014 Andrew Dawson. All Rights Reserved.
#
# This file is part of eofs.
#
# eofs is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# eofs is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License
# along with eofs. If not, see <http://www.gnu.org/licenses/>.
from nose import SkipTest
from nose.tools import raises
import numpy as np
try:
from iris.cube import Cube
except ImportError:
pass
import eofs
from eofs.tests import EofsTest
from .reference import reference_solution
from .utils import sign_adjustments
# Create a mapping from interface name to tools module and solver class.
tools = {'standard': eofs.tools.standard}
solvers = {'standard': eofs.standard.Eof}
try:
tools['cdms'] = eofs.tools.cdms
solvers['cdms'] = eofs.cdms.Eof
except AttributeError:
pass
try:
tools['iris'] = eofs.tools.iris
solvers['iris'] = eofs.iris.Eof
except AttributeError:
pass
class ToolsTest(EofsTest):
""""""
interface = None
weights = None
@classmethod
def setup_class(cls):
try:
cls.solution = reference_solution(cls.interface, cls.weights)
except ValueError:
raise SkipTest('library component not available '
'for {!s} interface'.format(cls.interface))
cls.neofs = cls.solution['eigenvalues'].shape[0]
try:
cls.solver = solvers[cls.interface](cls.solution['sst'],
weights=cls.solution['weights'])
cls.tools = {'covariance': tools[cls.interface].covariance_map,
'correlation': tools[cls.interface].correlation_map,}
except KeyError:
raise SkipTest('library component not available '
'for {!s} interface'.format(cls.interface))
def test_covariance_map(self):
# covariance maps should match reference EOFs as covariance
pcs = self.solver.pcs(npcs=self.neofs, pcscaling=1)
cov = self.tools['covariance'](pcs, self.solution['sst'])
eofs = self._tomasked(self.solver.eofs(neofs=self.neofs))
reofs = self._tomasked(self.solution['eofs'])
cov = self._tomasked(cov) * sign_adjustments(eofs, reofs)
self.assert_array_almost_equal(cov, self.solution['eofscov'])
def test_correlation_map(self):
# correlation maps should match reference EOFs as correlation
pcs = self.solver.pcs(npcs=self.neofs, pcscaling=1)
cor = self.tools['correlation'](pcs, self.solution['sst'])
eofs = self._tomasked(self.solver.eofs(neofs=self.neofs))
reofs = self._tomasked(self.solution['eofs'])
cor = self._tomasked(cor) * sign_adjustments(eofs, reofs)
self.assert_array_almost_equal(cor, self.solution['eofscor'])
def test_covariance_map_point(self):
# single point covariance map should match reference EOFs as covariance
# at the same point
pcs = self.solver.pcs(npcs=1, pcscaling=1)[:, 0]
cov = self.tools['covariance'](pcs, self.solution['sst'][:, 5, 5])
eofs = self._tomasked(self.solver.eofs(neofs=self.neofs))
reofs = self._tomasked(self.solution['eofs'])
cov = self._tomasked(cov) * sign_adjustments(eofs, reofs)[0]
self.assert_array_almost_equal(cov, self.solution['eofscov'][0, 5, 5])
def test_correlation_map_point(self):
# single point correlation map should match reference EOFs as
# correlation at the same point
pcs = self.solver.pcs(npcs=1, pcscaling=1)[:, 0]
cor = self.tools['correlation'](pcs, self.solution['sst'][:, 5, 5])
eofs = self._tomasked(self.solver.eofs(neofs=self.neofs))
reofs = self._tomasked(self.solution['eofs'])
cor = self._tomasked(cor) * sign_adjustments(eofs, reofs)[0]
self.assert_array_almost_equal(cor, self.solution['eofscor'][0, 5, 5])
def test_covcor_map_invalid_time_dimension(self):
# generate tests for covariance/correlation maps with invalid time
# dimensions
for maptype in ('covariance', 'correlation'):
yield self.check_covcor_map_invalid_time_dimension, maptype
@raises(ValueError)
def check_covcor_map_invalid_time_dimension(self, maptype):
# compute a map with an invalid time dimension in the input
pcs = self.solver.pcs(npcs=self.neofs, pcscaling=1)[:-1]
covcor = self.tools[maptype](pcs, self.solution['sst'])
def test_covcor_map_invalid_pc_shape(self):
# generate tests for covariance/correlation maps with input PCs with
# invalid shape
for maptype in ('covariance', 'correlation'):
yield self.check_covcor_map_invalid_pc_shape, maptype
@raises(ValueError)
def check_covcor_map_invalid_pc_shape(self, maptype):
# compute a map for PCs with invalid shape
covcor = self.tools[maptype](self.solution['sst'], self.solution['sst'])
#-----------------------------------------------------------------------------
# Tests for the standard interface
class TestToolsStandard(ToolsTest):
"""Test the standard interface tools."""
interface = 'standard'
weights = 'equal'
def _tomasked(self, value):
return value
#-----------------------------------------------------------------------------
# Tests for the cdms interface
class TestToolsCDMS(ToolsTest):
"""Test the cdms interface tools."""
interface = 'cdms'
weights = 'equal'
def _tomasked(self, value):
try:
return value.asma()
except AttributeError:
return value
#-----------------------------------------------------------------------------
# Tests for the iris interface
class TestToolsIris(ToolsTest):
"""Test the iris interface tools."""
interface = 'iris'
weights = 'equal'
def _tomasked(self, value):
if type(value) is not Cube:
return value
return value.data
| nicolasfauchereau/eofs | lib/eofs/tests/test_tools.py | Python | gpl-3.0 | 6,440 |
import collections
import json
import ApiHttpURLFetcher
class ApiRequestMaker:
def __init__(self, aso, url, apiKey):
self.aso = aso
self.url = url
self.apiKey = apiKey
self.default_user_agent = "Python Wrapper v1"
self.default_timeout = 5000
self.contentType = "application/json"
def fetchRecommendation(self, req):
request = self.buildSingletonRequest( "fetch_recommendation" , req)
return self.doFetch( request )
def fetchRecommendations(self):
request = self.buildRequest( "fetch_recommendations" )
return self.doFetch( request )
def fetchAllocation(self, req):
request = self.buildSingletonRequest( "fetch_allocation" , req)
return self.doFetch( request )
def fetchAllocations(self):
request = self.buildRequest( "fetch_allocations" )
return self.doFetch( request )
def fetchLastEngagementRecordSubmitted( self):
request = self.buildRequest( "last_engagement_record_submitted" )
return self.doFetch( request )
def postData(self, data):
request = self.buildPostDataRequest( "post_event_data", data)
return self.doFetch( request )
def doFetch(self, request):
self.aso.setRawHTTPRequest( request )
uf = ApiHttpURLFetcher.ApiHttpURLFetcher( self.default_user_agent, self.default_timeout, "POST")
uf.postData( self.url, self.contentType, request )
res = uf.getContent()
self.aso.setRawHTTPResponse( res )
return res
def buildRequest( self, r_type ):
header = collections.OrderedDict()
header['api_key'] = self.apiKey
header['type'] = r_type
return json.dumps( header )
def buildSingletonRequest( self, r_type, criteria ):
header = collections.OrderedDict()
header['api_key'] = self.apiKey
header['type'] = r_type
header['criteria'] = criteria
return json.dumps( header )
def buildPostDataRequest(self, r_type, data):
header = collections.OrderedDict()
header['api_key'] = self.apiKey
header['type'] = r_type
header['data'] = data
return json.dumps( header )
| PerceptLink/perceptlink-python-api-wrapper | src/ApiRequestMaker.py | Python | mit | 2,015 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Bruno Calogero <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_interface_policy_leaf_profile
short_description: Manage fabric interface policy leaf profiles (infra:AccPortP)
description:
- Manage fabric interface policy leaf profiles on Cisco ACI fabrics.
version_added: '2.5'
options:
leaf_interface_profile:
description:
- The name of the Fabric access policy leaf interface profile.
type: str
required: yes
aliases: [ name, leaf_interface_profile_name ]
description:
description:
- Description for the Fabric access policy leaf interface profile.
type: str
aliases: [ descr ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
name_alias:
version_added: '2.10'
description:
- The alias for the current object. This relates to the nameAlias field in ACI.
type: str
extends_documentation_fragment: aci
seealso:
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(infra:AccPortP).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Bruno Calogero (@brunocalogero)
'''
EXAMPLES = r'''
- name: Add a new leaf_interface_profile
aci_interface_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
leaf_interface_profile: leafintprfname
description: leafintprfname description
state: present
delegate_to: localhost
- name: Remove a leaf_interface_profile
aci_interface_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
leaf_interface_profile: leafintprfname
state: absent
delegate_to: localhost
- name: Remove all leaf_interface_profiles
aci_interface_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
state: absent
delegate_to: localhost
- name: Query a leaf_interface_profile
aci_interface_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
leaf_interface_profile: leafintprfname
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
leaf_interface_profile=dict(type='str', aliases=['name', 'leaf_interface_profile_name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
name_alias=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['leaf_interface_profile']],
['state', 'present', ['leaf_interface_profile']],
],
)
leaf_interface_profile = module.params.get('leaf_interface_profile')
description = module.params.get('description')
state = module.params.get('state')
name_alias = module.params.get('name_alias')
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='infraAccPortP',
aci_rn='infra/accportprof-{0}'.format(leaf_interface_profile),
module_object=leaf_interface_profile,
target_filter={'name': leaf_interface_profile},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='infraAccPortP',
class_config=dict(
name=leaf_interface_profile,
descr=description,
nameAlias=name_alias,
),
)
aci.get_diff(aci_class='infraAccPortP')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| shsingh/ansible | lib/ansible/modules/network/aci/aci_interface_policy_leaf_profile.py | Python | gpl-3.0 | 7,180 |
'''
Created on Dec 17, 2013
@author: [email protected]
This file is part of XOZE.
XOZE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
XOZE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with XOZE. If not, see <http://www.gnu.org/licenses/>.
'''
from xoze.context import AddonContext, SnapVideo
from xoze.snapvideo import Dailymotion, Playwire, YouTube, Tune_pk, VideoWeed, \
Nowvideo, Novamov, CloudEC, VideoHut, VideoTanker, LetWatch, VideoSky
from xoze.utils import file, http, jsonfile
from xoze.utils.cache import CacheManager
from xoze.utils.http import HttpClient
import BeautifulSoup
import base64
import logging
import pickle
import re
import time
import urllib
import xbmc # @UnresolvedImport
import xbmcgui # @UnresolvedImport
DIRECT_CHANNELS = {"Awards & Concerts":{"iconimage":"Awards.jpg",
"channelType": "IND",
"tvshow_episodes_url": "/forums/36-Awards-Performances-Concerts"},
"Latest & Exclusive Movies":{"iconimage":"Movies.jpeg",
"channelType": "IND",
"tvshow_episodes_url": "/forums/20-Latest-Exclusive-Movie-HQ"}}
LIVE_CHANNELS = {"9XM":{"iconimage":"http://www.lyngsat-logo.com/logo/tv/num/9x_music.png",
"channelType": "IND",
"channelUrl": "http://edge.purplestream.com/live/9xm/amlst:9xmus/playlist.m3u8"},
"MTunes":{"iconimage":"http://www.lyngsat-logo.com/logo/tv/mm/m_tunes_hd.png",
"channelType": "IND",
"channelUrl": "http://mthls-i.akamaihd.net/hls/live/219508/mthls/playlist.m3u8"},
"9X Jalwa":{"iconimage":"http://www.lyngsat-logo.com/logo/tv/num/9x_jalwa.png",
"channelType": "IND",
"channelUrl": "http://edge.purplestream.com/live/9xjal/amlst:9xjl/playlist.m3u8"},
"9x Tashan":{"iconimage":"http://www.lyngsat-logo.com/logo/tv/num/9x_tashan.png",
"channelType": "IND",
"channelUrl": "http://edge.purplestream.com/live/9xtas/amlst:9xtsh/playlist.m3u8"},
"9x Jhakaas":{"iconimage":"http://www.lyngsat-logo.com/logo/tv/num/9x_jhakaas.png",
"channelType": "IND",
"channelUrl": "http://edge.purplestream.com/live/9xjha/amlst:9xjak/playlist.m3u8"},
"IBN7": {"iconimage":"http://www.lyngsat-logo.com/logo/tv/ii/ibn7.png",
"channelType": "IND",
"channelUrl": "http://ibn7_hls-lh.akamaihd.net/i/ibn7_hls_n_1@174951/index_3_av-b.m3u8?sd=10&play-only=backup&rebase=on"},
"India TV": {"iconimage":"http://www.lyngsat-logo.com/logo/tv/ii/india_tv_in.png",
"channelType": "IND",
"channelUrl": "http://indiatvnews-lh.akamaihd.net/i/ITV_1@199237/master.m3u8"},
"Aajtak": {"iconimage":"http://www.lyngsat-logo.com/logo/tv/aa/aaj_tak.png",
"channelType": "IND",
"channelUrl": "plugin://plugin.video.youtube/?action=play_video&videoid=CNfJ3VzzC20"},
"Tez TV": {"iconimage":"http://www.lyngsat-logo.com/logo/tv/tt/tez_tv_in.png",
"channelType": "IND",
"channelUrl": "plugin://plugin.video.youtube/?action=play_video&videoid=McaDpXr1VCo"},
"Delhit Aajtak: {"iconimage":"http://www.lyngsat-logo.com/logo/tv/aa/aaj_tak_delhi.png",
"channelType": "IND",
"channelUrl": "plugin://plugin.video.youtube/?action=play_video&videoid=sGeMtVvkEwc"},
"Speed Records Punjabi": {"iconimage":"https://yt3.ggpht.com/-8R58jy8vKM0/AAAAAAAAAAI/AAAAAAAAAAA/YL3KTjHFUK4/s100-c-k-no/photo.jpg",
"channelType": "IND",
"channelUrl": "plugin://plugin.video.youtube/?action=play_video&videoid=5xdF97V_cWc"},
"92 News": {"iconimage":"http://www.lyngsat-logo.com/logo/tv/num/92_news_pk.png",
"channelType": "PAK",
"channelUrl": "rtsp://37.48.92.233:1935/live/92news_360p"}
}
BASE_WSITE_URL = base64.b64decode('aHR0cDovL3d3dy5kZXNpdHZib3gubWU=')
def check_cache(req_attrib, modelMap):
logging.getLogger().debug('DTB - Check cache ***********************')
logging.getLogger().debug(req_attrib)
refresh_cache = True
context = AddonContext()
filepath = file.resolve_file_path(context.get_addon_data_path(), extraDirPath='data', filename='DTB_Channels.json', makeDirs=True)
refresh = context.get_addon().getSetting('dtbForceRefresh')
if refresh == None or refresh != 'true':
modified_time = file.get_last_modified_time(filepath)
if modified_time is not None:
diff = long((time.time() - modified_time) / 3600)
if diff < 720:
refresh_cache = False
else:
logging.getLogger().debug('DTB_Channels.json was last created 30 days ago, refreshing data.')
else:
logging.getLogger().debug('Request to force refresh.')
modelMap['refresh_cache'] = refresh_cache
modelMap['cache_filepath'] = filepath
def refresh_cache(req_attrib, modelMap):
if not modelMap['refresh_cache']:
return
logging.getLogger().debug('Reloading cache...')
tv_data = {"channels": {"Star Plus":
{"iconimage":"http://www.lyngsat-logo.com/logo/tv/ss/star_plus.png",
"channelType": "IND",
"running_tvshows_url": "/star-plus/"},
"Zee TV":
{"iconimage":"http://www.lyngsat-logo.com/logo/tv/zz/zee_tv.png",
"channelType": "IND",
"running_tvshows_url": "/zee-tv/"},
"Sony TV":
{"iconimage":"http://www.lyngsat-logo.com/logo/tv/ss/set_in.png",
"channelType": "IND",
"running_tvshows_url": "/sony-tv/"},
"Sony Pal":
{"iconimage":"http://www.lyngsat-logo.com/logo/tv/ss/sony_pal_in.png",
"channelType": "IND",
"running_tvshows_url": "/sony-pal/"},
"Life OK":
{"iconimage":"http://www.lyngsat-logo.com/logo/tv/ll/life_ok_in.png",
"channelType": "IND",
"running_tvshows_url": "/life-ok/"},
"Sahara One":
{"iconimage":"http://www.lyngsat-logo.com/logo/tv/ss/sahara_one.png",
"channelType": "IND",
"running_tvshows_url": "/sahara-one/"},
"Colors TV":
{"iconimage":"http://www.lyngsat-logo.com/logo/tv/cc/colors_in.png",
"channelType": "IND",
"running_tvshows_url": "/colors-tv/"},
"Sab TV":
{"iconimage":"http://www.lyngsat-logo.com/logo/tv/ss/sony_sab_tv.png",
"channelType": "IND",
"running_tvshows_url": "/sab-tv/"},
"&TV":
{"iconimage":"http://akamai.vidz.zeecdn.com/zeedigital/AndTV/domain-data/logo/andtv-logo-pink-1421822560.png",
"channelType": "IND",
"running_tvshows_url": "/and-tv/"},
"MTV":
{"iconimage":"http://www.lyngsat-logo.com/logo/tv/mm/mtv_india.png",
"channelType": "IND",
"running_tvshows_url": "/mtv-channel/"},
"Bindass TV":
{"iconimage":"http://www.lyngsat-logo.com/logo/tv/uu/utv_bindass.png",
"channelType": "IND",
"running_tvshows_url": "/utv-bindass/"},
"Channel [V]":
{"iconimage":"http://www.lyngsat-logo.com/logo/tv/cc/channel_v_in.png",
"channelType": "IND",
"running_tvshows_url": "/channel-v/"},
"Zindagi TV":
{"iconimage":"http://www.lyngsat-logo.com/logo/tv/zz/zee_zindagi_in.png",
"channelType": "IND",
"running_tvshows_url": "/zindagi/"}
}
}
current_index = 0
tv_channels = tv_data['channels']
total_iteration = len(tv_channels)
progress_bar = modelMap['progress_control']
channel_image = modelMap['channel_image_control']
for tv_channel_name, tv_channel in tv_channels.iteritems():
logging.getLogger().debug('About to retrieve tv shows for channel %s' % tv_channel_name)
channel_image.setImage(tv_channel['iconimage'])
channel_image.setVisible(True)
loaded_tv_channel = __retrieve_channel_tv_shows__(tv_channel_name, tv_channel['running_tvshows_url'])
tv_channel["running_tvshows"] = loaded_tv_channel["running_tvshows"]
tv_channel["finished_tvshows"] = loaded_tv_channel["finished_tvshows"]
channel_image.setVisible(False)
current_index = current_index + 1
percent = (current_index * 100) / total_iteration
progress_bar.setPercent(percent)
status = jsonfile.write_file(modelMap['cache_filepath'], tv_data)
if status is not None:
logging.getLogger().debug('Saved status = ' + str(status))
CacheManager().put('dtb_tv_data', tv_data)
AddonContext().get_addon().setSetting('dtbForceRefresh', 'false')
CHANNEL_TYPE_IND = 'IND'
CHANNEL_TYPE_PAK = 'PAK'
def load_channels(req_attrib, modelMap):
logging.getLogger().debug('load channels...')
tv_channels = _read_tv_channels_cache_(modelMap['cache_filepath'])['channels']
tv_channel_items = []
live_tv_channel_items = []
display_channel_type = 1
live_channels_all = {}
live_channels_all.update(LIVE_CHANNELS)
live_filepath = file.resolve_file_path(AddonContext().get_addon_data_path(), extraDirPath='data', filename='Live.json', makeDirs=True)
live_file_channels = _read_live_tv_channels_cache_(live_filepath)
if live_file_channels is not None:
live_channels_all.update(live_file_channels)
channel_names = live_channels_all.keys()
channel_names.sort()
for channel_name in channel_names:
channel_obj = live_channels_all[channel_name]
if((display_channel_type == 1 and channel_obj['channelType'] == CHANNEL_TYPE_IND) or (display_channel_type == 0)):
item = xbmcgui.ListItem(label=channel_name, iconImage=channel_obj['iconimage'], thumbnailImage=channel_obj['iconimage'])
item.setProperty('channel-name', channel_name)
item.setProperty('live-link', 'true')
item.setProperty('direct-link', 'false')
live_tv_channel_items.append(item)
for channel_name in DIRECT_CHANNELS:
channel_obj = DIRECT_CHANNELS[channel_name]
if((display_channel_type == 1 and channel_obj['channelType'] == CHANNEL_TYPE_IND) or (display_channel_type == 0)):
item = xbmcgui.ListItem(label=channel_name, iconImage=channel_obj['iconimage'], thumbnailImage=channel_obj['iconimage'])
item.setProperty('channel-name', channel_name)
item.setProperty('direct-link', 'true')
item.setProperty('live-link', 'false')
tv_channel_items.append(item)
channel_names = tv_channels.keys()
channel_names.sort()
for channel_name in channel_names:
channel_obj = tv_channels[channel_name]
if ((display_channel_type == 1 and channel_obj['channelType'] == CHANNEL_TYPE_IND)
or (display_channel_type == 2 and channel_obj['channelType'] == CHANNEL_TYPE_PAK)
or (display_channel_type == 0)):
item = xbmcgui.ListItem(label=channel_name, iconImage=channel_obj['iconimage'], thumbnailImage=channel_obj['iconimage'])
item.setProperty('channel-name', channel_name)
item.setProperty('direct-link', 'false')
item.setProperty('live-link', 'false')
tv_channel_items.append(item)
modelMap['tv_channel_items'] = tv_channel_items
modelMap['live_tv_channel_items'] = live_tv_channel_items
def load_favorite_tv_shows(req_attrib, modelMap):
context = AddonContext()
filepath = file.resolve_file_path(context.get_addon_data_path(), extraDirPath='data', filename='DTB_Favorites.json', makeDirs=False)
logging.getLogger().debug('loading favorite tv shows from file : %s' % filepath)
favorite_tv_shows = _read_favorite_tv_shows_cache_(filepath)
if favorite_tv_shows is None:
return
favorite_tv_shows_items = []
tv_show_names = favorite_tv_shows.keys()
tv_show_names.sort()
for tv_show_name in tv_show_names:
favorite_tv_show = favorite_tv_shows[tv_show_name]
item = xbmcgui.ListItem(label=tv_show_name, iconImage=favorite_tv_show['tv-show-thumb'], thumbnailImage=favorite_tv_show['tv-show-thumb'])
item.setProperty('channel-type', favorite_tv_show['channel-type'])
item.setProperty('channel-name', favorite_tv_show['channel-name'])
item.setProperty('tv-show-name', tv_show_name)
item.setProperty('tv-show-url', favorite_tv_show['tv-show-url'])
item.setProperty('tv-show-thumb', favorite_tv_show['tv-show-thumb'])
favorite_tv_shows_items.append(item)
modelMap['favorite_tv_shows_items'] = favorite_tv_shows_items
def determine_direct_tv_channel(req_attrib, modelMap):
if(req_attrib['direct-link'] == 'true'):
logging.getLogger().debug('found direct channel redirect...')
return 'redirect:dr-displayDirectChannelEpisodesList'
def determine_live_tv_channel(req_attrib, modelMap):
if(req_attrib['live-link'] == 'true'):
logging.getLogger().debug('found live channel redirect...')
return 'redirect:dtb-watchLiveChannel'
def load_tv_shows(req_attrib, modelMap):
logging.getLogger().debug('load tv shows...')
tv_channels = CacheManager().get('dtb_tv_data')['channels']
channel_name = req_attrib['channel-name']
tv_channel = tv_channels[channel_name]
channel_type = tv_channel['channelType']
modelMap['channel_image'] = tv_channel['iconimage']
modelMap['channel_name'] = channel_name
selected_tv_show_name = ''
if req_attrib.has_key('tv-show-name'):
selected_tv_show_name = req_attrib['tv-show-name']
tv_show_items = []
index = 0
if tv_channel.has_key('running_tvshows'):
tv_shows = tv_channel['running_tvshows']
logging.getLogger().debug('total tv shows to be displayed: %s' % str(len(tv_shows)))
index = _prepare_tv_show_items_(tv_shows, channel_type, channel_name, selected_tv_show_name, tv_show_items, False, modelMap, index)
hideFinishedShow = AddonContext().get_addon().getSetting('drHideFinished')
if tv_channel.has_key('finished_tvshows') and hideFinishedShow is not None and hideFinishedShow == 'false':
tv_shows = tv_channel["finished_tvshows"]
logging.getLogger().debug('total finsihed tv shows to be displayed: %s' % str(len(tv_shows)))
index = _prepare_tv_show_items_(tv_shows, channel_type, channel_name, selected_tv_show_name, tv_show_items, True, modelMap, index)
modelMap['tv_show_items'] = tv_show_items
def load_direct_link_channel(req_attrib, modelMap):
channel_name = req_attrib['channel-name']
tv_channel = DIRECT_CHANNELS[channel_name]
modelMap['channel_image'] = tv_channel['iconimage']
modelMap['channel_name'] = channel_name
req_attrib['tv-show-url'] = BASE_WSITE_URL + tv_channel['tvshow_episodes_url']
req_attrib['tv-show-name'] = ''
req_attrib['channel-type'] = tv_channel['channelType']
def re_me(data, re_patten):
match = ''
m = re.search(re_patten, data)
if m != None:
match = m.group(1)
else:
match = ''
return match
def watch_live(req_attrib, modelMap):
channel_name = req_attrib['channel-name']
live_filepath = file.resolve_file_path(AddonContext().get_addon_data_path(), extraDirPath='data', filename='Live.json', makeDirs=True)
live_file_channels = _read_live_tv_channels_cache_(live_filepath)
tv_channel = None
if LIVE_CHANNELS.has_key(channel_name):
tv_channel = LIVE_CHANNELS[channel_name]
if live_file_channels is not None and live_file_channels.has_key(channel_name):
tv_channel = live_file_channels[channel_name]
item = xbmcgui.ListItem(label=channel_name, iconImage=tv_channel['iconimage'], thumbnailImage=tv_channel['iconimage'])
item.setProperty('streamLink', tv_channel['channelUrl'])
modelMap['live_item'] = item
def _prepare_tv_show_items_(tv_shows, channel_type, channel_name, selected_tv_show_name, tv_show_items, is_finished_shows, modelMap, index):
tv_shows.sort()
for tv_show in tv_shows:
name = tv_show['name']
if is_finished_shows:
name = name + ' [COLOR gray]finished[/COLOR]'
item = xbmcgui.ListItem(label=name)
item.setProperty('channel-type', channel_type)
item.setProperty('channel-name', channel_name)
item.setProperty('tv-show-name', name)
if is_finished_shows:
item.setProperty('tv-show-finished', 'true')
else:
item.setProperty('tv-show-finished', 'false')
item.setProperty('tv-show-url', tv_show['url'])
tv_show_items.append(item)
if selected_tv_show_name == name:
modelMap['selected_tv_show_item'] = index
index = index + 1
return index
def empty_function(req_attrib, modelMap):
return
def add_tv_show_favorite(req_attrib, modelMap):
logging.getLogger().debug('add tv show favorite...')
tv_show_url = req_attrib['tv-show-url']
tv_show_name = req_attrib['tv-show-name']
tv_show_thumb = req_attrib['tv-show-thumb']
channel_type = req_attrib['channel-type']
channel_name = req_attrib['channel-name']
logging.getLogger().debug('add tv show favorite...' + tv_show_url)
favorites = CacheManager().get('dtb_tv_favorites')
if favorites is None:
favorites = {}
elif favorites.has_key(tv_show_name):
favorites.pop(tv_show_name)
favorites[tv_show_name] = {'tv-show-name':tv_show_name, 'tv-show-thumb':tv_show_thumb, 'tv-show-url':tv_show_url, 'channel-name':channel_name, 'channel-type':channel_type}
context = AddonContext()
filepath = file.resolve_file_path(context.get_addon_data_path(), extraDirPath='data', filename='DTB_Favorites.json', makeDirs=False)
logging.getLogger().debug(favorites)
_write_favorite_tv_shows_cache_(filepath, favorites)
notification = "XBMC.Notification(%s,%s,%s,%s)" % (tv_show_name, 'ADDED TO FAVORITES', 2500, tv_show_thumb)
xbmc.executebuiltin(notification)
def load_remove_tv_show_favorite(req_attrib, modelMap):
logging.getLogger().debug('load remove tv show favorite...')
modelMap['tv-show-name'] = req_attrib['tv-show-name']
modelMap['tv-show-thumb'] = req_attrib['tv-show-thumb']
logging.getLogger().debug('display remove tv show favorite...')
def remove_favorite(req_attrib, modelMap):
logging.getLogger().debug('remove tv show favorite...')
favorite = CacheManager().get('dtb_selected_favorite')
favorite_thumb = CacheManager().get('dtb_selected_favorite_thumb')
favorites = CacheManager().get('dtb_tv_favorites')
if favorites is None:
favorites = {}
elif favorites.has_key(favorite):
favorites.pop(favorite)
context = AddonContext()
filepath = file.resolve_file_path(context.get_addon_data_path(), extraDirPath='data', filename='DTB_Favorites.json', makeDirs=False)
logging.getLogger().debug(favorites)
_write_favorite_tv_shows_cache_(filepath, favorites)
notification = "XBMC.Notification(%s,%s,%s,%s)" % (favorite, 'REMOVED FAVORITE', 2500, favorite_thumb)
xbmc.executebuiltin(notification)
modelMap['reload_favorite_tv_shows_items'] = True
if len(favorites) > 0:
favorite_tv_shows_items = []
for tv_show_name in favorites:
favorite_tv_show = favorites[tv_show_name]
item = xbmcgui.ListItem(label=tv_show_name, iconImage=favorite_tv_show['tv-show-thumb'], thumbnailImage=favorite_tv_show['tv-show-thumb'])
item.setProperty('channel-type', favorite_tv_show['channel-type'])
item.setProperty('channel-name', favorite_tv_show['channel-name'])
item.setProperty('tv-show-name', tv_show_name)
item.setProperty('tv-show-url', favorite_tv_show['tv-show-url'])
item.setProperty('tv-show-thumb', favorite_tv_show['tv-show-thumb'])
favorite_tv_shows_items.append(item)
modelMap['favorite_tv_shows_items'] = favorite_tv_shows_items
def load_tv_show_episodes(req_attrib, modelMap):
logging.getLogger().debug('load tv show episodes...')
url = req_attrib['tv-show-url']
tv_show_url = req_attrib['tv-show-url']
tv_show_name = req_attrib['tv-show-name']
channel_type = req_attrib['channel-type']
channel_name = req_attrib['channel-name']
currentPage = 1
if req_attrib.has_key('tv-show-page') and req_attrib['tv-show-page'] != '':
currentPage = int(req_attrib['tv-show-page'])
if currentPage != 1:
url = url + 'page/' + req_attrib['tv-show-page'] + '/'
logging.getLogger().debug('load tv show episodes...' + url)
contentDiv = BeautifulSoup.SoupStrainer('div', {'id':'left-div'})
soup = HttpClient().get_beautiful_soup(url=url + '?tag=video', parseOnlyThese=contentDiv)
# soup = BeautifulSoup.BeautifulSoup(HttpClient().get_html_content(url=url)).findAll('div', {'id':'contentBody'})[0]
tv_show_episode_items = []
threads = soup.findAll('h2', {'class':'titles'})
tv_show_episode_items.extend(__retrieveTVShowEpisodes__(threads, tv_show_name, channel_type, channel_name))
logging.getLogger().debug('In DTB: total tv show episodes: %s' % str(len(tv_show_episode_items)))
pagesDiv = soup.findChild('p', {'class':'pagination'})
if pagesDiv is not None:
pagesInfoTags = pagesDiv.findAllNext('a')
for pagesInfoTag in pagesInfoTags:
logging.getLogger().debug(pagesInfoTag)
pageInfo = re.compile('page/(.+?)/').findall(pagesInfoTag['href'])
if len(pageInfo) > 0:
if re.search('Old', pagesInfoTag.getText(), re.IGNORECASE):
item = xbmcgui.ListItem(label='<< Older Entries')
elif re.search('Next', pagesInfoTag.getText(), re.IGNORECASE):
item = xbmcgui.ListItem(label='Next Entries >>')
item.setProperty('tv-show-page', pageInfo[0][0])
item.setProperty('channel-type', channel_type)
item.setProperty('channel-name', channel_name)
item.setProperty('tv-show-name', tv_show_name)
item.setProperty('tv-show-url', tv_show_url)
tv_show_episode_items.append(item)
else:
item = xbmcgui.ListItem(label='Newest Entries >>')
item.setProperty('tv-show-page', '1')
item.setProperty('channel-type', channel_type)
item.setProperty('channel-name', channel_name)
item.setProperty('tv-show-name', tv_show_name)
item.setProperty('tv-show-url', tv_show_url)
tv_show_episode_items.append(item)
modelMap['tv_show_episode_items'] = tv_show_episode_items
def __retrieveTVShowEpisodes__(threads, tv_show_name, channel_type, channel_name):
tv_show_episode_items = []
logging.getLogger().debug(threads)
if threads is None:
return []
for thread in threads:
aTag = thread.findNext('a')
episodeName = aTag.getText()
titleInfo = http.unescape(episodeName)
titleInfo = titleInfo.replace(tv_show_name, '')
titleInfo = titleInfo.replace('Full Episode Watch Online', '')
titleInfo = titleInfo.replace('Watch Online', '')
titleInfo = titleInfo.strip()
item = xbmcgui.ListItem(label=titleInfo)
episode_url = str(aTag['href'])
if not episode_url.lower().startswith(BASE_WSITE_URL):
if episode_url[0] != '/':
episode_url = '/' + episode_url
episode_url = BASE_WSITE_URL + episode_url
item.setProperty('tv-show-name', tv_show_name)
item.setProperty('channel-type', channel_type)
item.setProperty('channel-name', channel_name)
item.setProperty('episode-name', titleInfo)
item.setProperty('episode-url', episode_url)
tv_show_episode_items.append(item)
return tv_show_episode_items
def determine_tv_show_episode_videos(req_attrib, modelMap):
logging.getLogger().debug('determine tv show episode videos...')
if req_attrib['episode-url'] is None or req_attrib['episode-url'] == '':
return 'redirect:dtb-displayShowEpisodesList'
def load_tv_show_episode_videos(req_attrib, modelMap):
logging.getLogger().debug('load tv show episode videos...')
list_items = _retrieve_video_links_(req_attrib, modelMap)
''' Following new cool stuff is to get Smart Direct Play Feature'''
playNowItem = __findPlayNowStream__(list_items)
logging.getLogger().debug('found play now stream... ')
modelMap['selected-playlist-item'] = playNowItem['selected']
modelMap['backup-playlist-item'] = playNowItem['backup']
def load_tv_show_episode_videos_list(req_attrib, modelMap):
logging.getLogger().debug('load tv show episode videos list...')
list_items = _retrieve_video_links_(req_attrib, modelMap)
modelMap['videos-item-list'] = list_items
def load_selected_playlist_streams(req_attrib, modelMap):
selected_playlist_item = modelMap['selected-playlist-item']
video_items = None
if selected_playlist_item is not None:
selected_playlist = selected_playlist_item.getProperty('videoPlayListItemsKey')
logging.getLogger().debug('load selected playlist streams... %s' % selected_playlist)
playlist_items = modelMap[selected_playlist]
try:
video_items = _retrieve_playlist_streams_(modelMap['progress_control'], playlist_items)
except:
modelMap['progress_control'].setPercent(0)
pass
if video_items is None:
backup_playlist_item = modelMap['backup-playlist-item']
backup_playlist = backup_playlist_item.getProperty('videoPlayListItemsKey')
logging.getLogger().debug('load backup playlist streams... %s' % backup_playlist)
playlist_items = modelMap[backup_playlist]
video_items = _retrieve_playlist_streams_(modelMap['progress_control'], playlist_items)
modelMap['video_streams'] = video_items
def _retrieve_playlist_streams_(progress_bar, playlist_items):
lazyLoadStream = AddonContext().get_addon().getSetting('drLazyLoadStream')
current_index = 1
total_iteration = len(playlist_items)
video_items = []
for item in playlist_items:
logging.getLogger().debug('About to retrieve video link %s' % item)
video_item = None
if lazyLoadStream is None or lazyLoadStream == 'false':
video_item = SnapVideo().resolveVideoStream(item['videoLink'])
else:
video_item = _create_video_stream_item(item['videoLink'], str(current_index))
video_items.append(video_item)
percent = (current_index * 100) / total_iteration
progress_bar.setPercent(percent)
current_index = current_index + 1
return video_items
def load_selected_video_playlist_streams(req_attrib, modelMap):
progress_bar = req_attrib['progress_control']
progress_bar.setPercent(0)
video_items = None
if req_attrib['is-playlist'] == 'true':
playlist_items = pickle.loads(req_attrib['videos'])
video_items = _retrieve_playlist_streams_(progress_bar, playlist_items)
else:
video_items = []
video_item = SnapVideo().resolveVideoStream(req_attrib['video-link'])
video_items.append(video_item)
progress_bar.setPercent(100)
modelMap['video_streams'] = video_items
def _create_video_stream_item(videoLink, inx=''):
videoHostingInfo = SnapVideo().findVideoHostingInfo(videoLink)
label = videoHostingInfo.get_name() + inx
item = xbmcgui.ListItem(label=label, iconImage=videoHostingInfo.get_icon(), thumbnailImage=videoHostingInfo.get_icon())
item.setProperty('streamLink', 'plugin://plugin.video.tvondesizonexl/?videoLink=' + urllib.quote_plus(videoLink))
return item
def _read_tv_channels_cache_(filepath):
tv_data = CacheManager().get('dtb_tv_data')
if tv_data is None:
tv_data = jsonfile.read_file(filepath)
CacheManager().put('dtb_tv_data', tv_data)
return tv_data
def _read_live_tv_channels_cache_(filepath):
live_tv_data = CacheManager().get('live_tv_data')
if live_tv_data is None:
live_tv_data = jsonfile.read_file(filepath)
CacheManager().put('live_tv_data', live_tv_data)
return live_tv_data
def _read_favorite_tv_shows_cache_(filepath):
favorites = CacheManager().get('dtb_tv_favorites')
if favorites is None:
favorites = jsonfile.read_file(filepath)
CacheManager().put('dtb_tv_favorites', favorites)
return favorites
def _write_favorite_tv_shows_cache_(filepath, data):
CacheManager().put('dtb_tv_favorites', data)
jsonfile.write_file(filepath, data)
def __retrieve_tv_shows__(tv_channel_url):
tv_channel = {}
tv_channel["running_tvshows"] = []
tv_channel["finished_tvshows"] = []
logging.getLogger().debug('TV Channel URL: ' + tv_channel_url)
tv_shows = tv_channel["running_tvshows"]
if tv_channel_url is None:
return tv_shows
tv_channel_url = BASE_WSITE_URL + tv_channel_url
logging.getLogger().debug(tv_channel_url)
contentDiv = BeautifulSoup.SoupStrainer('li', {'class':'categories'})
soup = HttpClient().get_beautiful_soup(url=tv_channel_url, parseOnlyThese=contentDiv)
# soup = BeautifulSoup.BeautifulSoup(HttpClient().get_html_content(url=tv_channel_url)).findAll('div', {'id':'forumbits', 'class':'forumbits'})[0]
for title_tag in soup.findAll('li'):
aTag = title_tag.findNext('a')
tv_show_url = str(aTag['href'])
if tv_show_url[0:4] != "http":
tv_show_url = BASE_WSITE_URL + '/' + tv_show_url
tv_show_name = aTag.getText()
if not re.search('Completed Shows', tv_show_name, re.IGNORECASE):
tv_shows.append({"name":http.unescape(tv_show_name), "url":tv_show_url, "iconimage":""})
else:
tv_shows = tv_channel["finished_tvshows"]
return tv_channel
def __retrieve_channel_tv_shows__(tv_channel_name, tv_channel_url):
tv_channel = {}
try:
tv_channel = __retrieve_tv_shows__(tv_channel_url)
if(len(tv_channel["running_tvshows"]) == 0):
tv_channel["running_tvshows"].append({"name":"ERROR: UNABLE TO LOAD. Share message on http://forum.xbmc.org/showthread.php?tid=115583", "url":BASE_WSITE_URL + tv_channel["running_tvshows_url"]})
except Exception, e:
logging.getLogger().exception(e)
logging.getLogger().debug('Failed to load a channel <%s>. continue retrieval of next tv show' % tv_channel_name)
return tv_channel
def _retrieve_video_links_(req_attrib, modelMap):
modelMap['channel-name'] = req_attrib['channel-name']
modelMap['tv-show-name'] = req_attrib['tv-show-name']
modelMap['episode-name'] = req_attrib['episode-name']
video_source_id = 1
video_source_img = None
video_source_name = None
video_part_index = 0
video_playlist_items = []
ignoreAllLinks = False
list_items = []
contentDiv = BeautifulSoup.SoupStrainer('div', {'id':'left-div'})
soup = HttpClient().get_beautiful_soup(url=req_attrib['episode-url'], parseOnlyThese=contentDiv)
# soup = BeautifulSoup.BeautifulSoup(HttpClient().get_html_content(url=req_attrib['episode-url'])).findAll('blockquote', {'class':re.compile(r'\bpostcontent\b')})[0]
centerTag = soup.findNext('center')
logging.getLogger().debug(centerTag)
prevChild = ''
prevAFont = None
isHD = 'false'
videoSource = ''
for child in soup.findChildren():
if child.name == 'span':
if len(video_playlist_items) > 0:
list_items.append(__preparePlayListItem__(video_source_id, video_source_img, video_source_name, video_playlist_items, modelMap, isHD))
logging.getLogger().debug(videoSource)
videoSource = child.getText()
if(re.search('720p', videoSource, re.I)):
isHD = 'true'
else:
isHD = 'false'
if video_source_img is not None:
video_source_id = video_source_id + 1
video_source_img = None
video_source_name = None
video_part_index = 0
video_playlist_items = []
ignoreAllLinks = False
elif not ignoreAllLinks and child.name == 'a':
if (str(child['href']) != 'https://www.facebook.com/iamdesirulez'):
video_part_index = video_part_index + 1
video_link = {}
video_link['videoTitle'] = 'Source #' + str(video_source_id) + ' | ' + 'Part #' + str(video_part_index) + ' | ' + child.getText()
video_link['videoLink'] = str(child['href'])
video_link['videoSource'] = videoSource
try:
try:
__prepareVideoLink__(video_link)
except Exception, e:
logging.getLogger().error(e)
video_hosting_info = SnapVideo().findVideoHostingInfo(video_link['videoLink'])
if video_hosting_info is None or video_hosting_info.get_name() == 'UrlResolver by t0mm0':
raise
video_link['videoSourceImg'] = video_hosting_info.get_icon()
video_link['videoSourceName'] = video_hosting_info.get_name()
video_playlist_items.append(video_link)
video_source_img = video_link['videoSourceImg']
video_source_name = video_link['videoSourceName']
item = xbmcgui.ListItem(label='Source #' + str(video_source_id) + ' | ' + 'Part #' + str(video_part_index) , iconImage=video_source_img, thumbnailImage=video_source_img)
item.setProperty('videoLink', video_link['videoLink'])
item.setProperty('videoTitle', video_link['videoTitle'])
item.setProperty('videoSourceName', video_source_name)
item.setProperty('isContinuousPlayItem', 'false')
list_items.append(item)
prevAFont = child.findChild('font')
except:
logging.getLogger().error('Unable to recognize a source = ' + str(video_link['videoLink']))
video_source_img = None
video_source_name = None
video_part_index = 0
video_playlist_items = []
ignoreAllLinks = True
prevAFont = None
prevChild = child.name
if len(video_playlist_items) > 0:
list_items.append(__preparePlayListItem__(video_source_id, video_source_img, video_source_name, video_playlist_items, modelMap, isHD))
return list_items
def __preparePlayListItem__(video_source_id, video_source_img, video_source_name, video_playlist_items, modelMap, isHD):
item = xbmcgui.ListItem(label='[B]Continuous Play[/B]' + ' | ' + 'Source #' + str(video_source_id) + ' | ' + 'Parts = ' + str(len(video_playlist_items)) , iconImage=video_source_img, thumbnailImage=video_source_img)
item.setProperty('videoSourceName', video_source_name)
item.setProperty('isContinuousPlayItem', 'true')
item.setProperty('isHD', isHD)
item.setProperty('videoPlayListItemsKey', 'playlist#' + str(video_source_id))
item.setProperty('videosList', pickle.dumps(video_playlist_items))
modelMap['playlist#' + str(video_source_id)] = video_playlist_items
return item
def __prepareVideoLink__(video_link):
logging.getLogger().debug(video_link)
video_url = video_link['videoLink']
video_source = video_link['videoSource']
new_video_url = None
if re.search('videos.desihome.info', video_url, flags=re.I):
new_video_url = __parseDesiHomeUrl__(video_url)
if new_video_url is None:
video_id = re.compile('(id|url|v|si|sim)=(.+?)/').findall(video_url + '/')[0][1]
if re.search('dm(\d*).php', video_url, flags=re.I) or ((re.search('([a-z]*).tv/', video_url, flags=re.I) or re.search('([a-z]*).net/', video_url, flags=re.I) or re.search('([a-z]*).com/', video_url, flags=re.I) or re.search('([a-z]*).me/', video_url, flags=re.I)) and not video_id.isdigit() and re.search('dailymotion', video_source, flags=re.I)):
new_video_url = 'http://www.dailymotion.com/video/' + video_id + '_'
elif re.search('(flash.php|fp.php|wire.php|pw.php)', video_url, flags=re.I) or ((re.search('([a-z]*).tv/', video_url, flags=re.I) or re.search('([a-z]*).net/', video_url, flags=re.I) or re.search('([a-z]*).com/', video_url, flags=re.I) or re.search('([a-z]*).me/', video_url, flags=re.I)) and video_id.isdigit() and re.search('flash', video_source, flags=re.I)):
new_video_url = 'http://config.playwire.com/videos/v2/' + video_id + '/player.json'
elif re.search('(youtube|u|yt)(\d*).php', video_url, flags=re.I):
new_video_url = 'http://www.youtube.com/watch?v=' + video_id + '&'
elif re.search('mega.co.nz', video_url, flags=re.I):
new_video_url = video_url
elif re.search('(put|pl).php', video_url, flags=re.I):
new_video_url = 'http://www.putlocker.com/file/' + video_id
elif re.search('(cl|cloud).php', video_url, flags=re.I) or ((re.search('([a-z]*).tv/', video_url, flags=re.I) or re.search('([a-z]*).net/', video_url, flags=re.I) or re.search('([a-z]*).com/', video_url, flags=re.I) or re.search('([a-z]*).me/', video_url, flags=re.I)) and not video_id.isdigit() and re.search('cloudy', video_source, flags=re.I)):
new_video_url = 'https://www.cloudy.ec/embed.php?id=' + str(video_id)
elif re.search('videohut.php', video_url, flags=re.I) or ((re.search('([a-z]*).tv/', video_url, flags=re.I) or re.search('([a-z]*).net/', video_url, flags=re.I) or re.search('([a-z]*).com/', video_url, flags=re.I) or re.search('([a-z]*).me/', video_url, flags=re.I)) and not video_id.isdigit() and re.search('video hut', video_source, flags=re.I)):
new_video_url = 'http://www.videohut.to/embed.php?id=' + video_id
elif re.search('letwatch.php', video_url, flags=re.I) or ((re.search('([a-z]*).tv/', video_url, flags=re.I) or re.search('([a-z]*).net/', video_url, flags=re.I) or re.search('([a-z]*).com/', video_url, flags=re.I) or re.search('([a-z]*).me/', video_url, flags=re.I)) and not video_id.isdigit() and re.search('letwatch', video_source, flags=re.I)):
new_video_url = 'http://letwatch.us/embed-' + str(video_id) + '-620x496.html'
elif re.search('videosky.php', video_url, flags=re.I) or ((re.search('([a-z]*).tv/', video_url, flags=re.I) or re.search('([a-z]*).net/', video_url, flags=re.I) or re.search('([a-z]*).com/', video_url, flags=re.I) or re.search('([a-z]*).me/', video_url, flags=re.I)) and not video_id.isdigit() and re.search('video sky', video_source, flags=re.I)):
new_video_url = 'http://www.videosky.to/embed.php?id=' + str(video_id)
elif re.search('(weed.php|vw.php)', video_url, flags=re.I):
new_video_url = 'http://www.videoweed.es/file/' + video_id
elif re.search('(sockshare.com|sock.com)', video_url, flags=re.I):
new_video_url = video_url
elif re.search('divxstage.php', video_url, flags=re.I):
new_video_url = 'divxstage.eu/video/' + video_id + '&'
elif re.search('(hostingbulk|hb).php', video_url, flags=re.I):
new_video_url = 'hostingbulk.com/' + video_id + '&'
elif re.search('(movshare|ms).php', video_url, flags=re.I):
new_video_url = 'movshare.net/video/' + video_id + '&'
elif re.search('mz.php', video_url, flags=re.I):
new_video_url = 'movzap.com/' + video_id + '&'
elif re.search('nv.php', video_url, flags=re.I):
new_video_url = 'nowvideo.ch/embed.php?v=' + video_id + '&'
elif re.search('nm.php', video_url, flags=re.I):
new_video_url = 'novamov.com/video/' + video_id + '&'
elif re.search('tune.php', video_url, flags=re.I) or ((re.search('([a-z]*).tv/', video_url, flags=re.I) or re.search('([a-z]*).net/', video_url, flags=re.I) or re.search('([a-z]*).com/', video_url, flags=re.I) or re.search('([a-z]*).me/', video_url, flags=re.I)) and video_id.isdigit() and re.search('tune.pk', video_source, flags=re.I)):
new_video_url = 'tune.pk/play/' + video_id + '&'
elif re.search('vshare.php', video_url, flags=re.I):
new_video_url = 'http://vshare.io/d/' + video_id + '&'
elif re.search('vidto.php', video_url, flags=re.I):
new_video_url = 'http://vidto.me/' + video_id + '.html'
elif re.search('videotanker.php', video_url, flags=re.I) or ((re.search('([a-z]*).tv/', video_url, flags=re.I) or re.search('([a-z]*).net/', video_url, flags=re.I) or re.search('([a-z]*).com/', video_url, flags=re.I) or re.search('([a-z]*).me/', video_url, flags=re.I)) and video_id.isdigit() and re.search('video tanker', video_source, flags=re.I)):
new_video_url = 'http://videotanker.co/player/embed_player.php?vid=' + video_id + '&'
video_hosting_info = SnapVideo().findVideoHostingInfo(new_video_url)
video_link['videoLink'] = new_video_url
video_link['videoSourceImg'] = video_hosting_info.get_icon()
video_link['videoSourceName'] = video_hosting_info.get_name()
def __parseDesiHomeUrl__(video_url):
video_link = None
logging.getLogger().debug('video_url = ' + video_url)
html = HttpClient().get_html_content(url=video_url)
if re.search('dailymotion.com', html, flags=re.I):
video_link = 'http://www.dailymotion.com/' + re.compile('dailymotion.com/(.+?)"').findall(html)[0] + '&'
elif re.search('hostingbulk.com', html, flags=re.I):
video_link = 'http://hostingbulk.com/' + re.compile('hostingbulk.com/(.+?)"').findall(html)[0] + '&'
elif re.search('movzap.com', html, flags=re.I):
video_link = 'http://movzap.com/' + re.compile('movzap.com/(.+?)"').findall(html)[0] + '&'
return video_link
PREFERRED_DIRECT_PLAY_ORDER = [Dailymotion.VIDEO_HOSTING_NAME, LetWatch.VIDEO_HOST_NAME, CloudEC.VIDEO_HOST_NAME, VideoWeed.VIDEO_HOST_NAME, Tune_pk.VIDEO_HOSTING_NAME, YouTube.VIDEO_HOSTING_NAME, Nowvideo.VIDEO_HOST_NAME, Novamov.VIDEO_HOST_NAME]
def __findPlayNowStream__(new_items):
# if AddonContext().get_addon().getSetting('autoplayback') == 'false':
# return None
logging.getLogger().debug('FINDING the source..')
selectedIndex = None
selectedSource = None
hdSelected = False
backupSource = None
backupSourceName = None
for item in new_items:
if item.getProperty('isContinuousPlayItem') == 'true':
source_name = item.getProperty('videoSourceName')
try:
logging.getLogger().debug(source_name)
preference = PREFERRED_DIRECT_PLAY_ORDER.index(item.getProperty('videoSourceName'))
if preference == 0 and (selectedIndex is None or selectedIndex != 0) and not hdSelected :
selectedSource = item
selectedIndex = 0
elif selectedIndex is None or selectedIndex > preference:
selectedSource = item
selectedIndex = preference
if item.getProperty('isHD') == 'true' and selectedIndex is not None:
hdSelected = True
if ((source_name == CloudEC.VIDEO_HOST_NAME or source_name == VideoWeed.VIDEO_HOST_NAME) and backupSource is None):
logging.getLogger().debug("Added to backup plan: %s" % source_name)
backupSource = item
backupSourceName = source_name
except ValueError:
logging.getLogger().debug("Exception for source : %s" % source_name)
if source_name == CloudEC.VIDEO_HOST_NAME and (backupSource is None or backupSourceName != CloudEC.VIDEO_HOST_NAME):
logging.getLogger().debug("Added to backup plan: %s" % source_name)
backupSource = item
backupSourceName = source_name
elif backupSource is None:
logging.getLogger().debug("Added to backup plan when Playwire not found: %s" % source_name)
backupSource = item
backupSourceName = source_name
continue
sources = {}
sources['selected'] = selectedSource
sources['backup'] = backupSource
return sources
| JRepoInd/Repo_Indi | plugin.video.tvondesizonexl/tvshows/dtb_actions.py | Python | gpl-2.0 | 46,447 |
# python3
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example call to Google's speech-to-text API to transcribe Localized Narrative recordings.
Pre-requisites:
- Set up Google's API authentication:
https://cloud.google.com/docs/authentication/getting-started
- Install dependencies:
+ pip install ffmpeg
+ pip install pydub
+ pip install google-cloud-speech
Comments:
- Google's speech-to-text API does not support the Vorbis encoding in which the
Localized Narrative recordings were released. We therefore need to transcode
them Opus, which is supported. We do this in`convert_recording`.
- Transcription is limited to 60 seconds if loaded from a local file. For audio
longer than 1 minute, we need to upload the file to a GCS bucket and load the
audio using its URI: `audio = speech.RecognitionAudio(uri=gcs_uri)`.
"""
import io
import os
from google.cloud import speech
import pydub
def convert_recording(input_file, output_file):
with open(input_file, 'rb') as f:
recording = pydub.AudioSegment.from_file(f, codec='libvorbis')
with open(output_file, 'wb') as f:
recording.export(f, format='ogg', codec='libopus')
def speech_to_text(recording_file):
# Loads from local file. If longer than 60 seconds, upload to GCS and use
# `audio = speech.RecognitionAudio(uri=gcs_uri)`
with io.open(recording_file, 'rb') as audio_file:
content = audio_file.read()
audio = speech.RecognitionAudio(content=content)
config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.OGG_OPUS,
sample_rate_hertz=48000,
audio_channel_count=2,
max_alternatives=10,
enable_word_time_offsets=True,
language_code='en-IN')
client = speech.SpeechClient()
operation = client.long_running_recognize(config=config, audio=audio)
return operation.result(timeout=90)
if __name__ == '__main__':
# Input encoded in Vorbis in an OGG container.
input_recording = '/Users/jponttuset/Downloads/coco_val_137576_93.ogg'
basename, extension = os.path.splitext(input_recording)
output_recording = f'{basename}_opus{extension}'
# Re-encodes in Opus and saves to file.
convert_recording(input_recording, output_recording)
# Actual call to Google's speech-to-text API.
result = speech_to_text(output_recording)
print(result)
| google/localized-narratives | transcription_example.py | Python | apache-2.0 | 2,874 |
from __future__ import print_function
from django.core.management.base import BaseCommand
from django.db import transaction
import pandas as pd
import math
from sa_api_v2.models import (
Tag,
PlaceTag,
Place,
DataSet
)
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
TAG_MAPPINGS = {
"Remove-above cost": "above costs",
"Removed-cost above": "above costs",
"Removed-Cost above": "above costs",
"Remove- above cost": "above costs",
"Vetted": "Vetted",
"Vettted": "Vetted",
"vetted": "Vetted",
"Remove-illegal": "illegal",
"Remove-programmatic": "programmatic",
"Remove- programmatic": "programmatic",
"Remove-separate process": "programmatic",
"Remove-incomplete": "incomplete",
"Remove-county function": "county function",
"Remove-private": "private",
}
# 1. Create the tags on our pbdurham dataset
TAGS = [
{
"name": "Removed",
"is_enabled": False,
"children": [
{
"name": "above costs",
"color": "#c9302c"
},
{
"name": "illegal",
"color": "#c9302c"
},
{
"name": "programmatic",
"color": "#c9302c"
},
{
"name": "separate process",
"color": "#c9302c"
},
{
"name": "incomplete",
"color": "#c9302c"
},
{
"name": "county function",
"color": "#c9302c"
},
{
"name": "private",
"color": "#c9302c"
},
{
"name": "not in durham",
"color": "#c9302c"
},
]
},
{
"name": "Vetted",
"color": "#449d44"
}
]
# 2. parse the csv
FILEPATH = "./prevet-tags.csv"
# 3. get the tag name from the "Pre-Vetting Status" column
# find the tag using the tag name and TAG_MAPPINGS
# 4. get the place id from the "Mapseed ID" column
# 5. create a PlaceTag, Tag to the Place model
def create_tags():
dataset = DataSet.objects.get(display_name="pbdurham")
def create_tag(tag, parent):
is_enabled = False if tag.get("is_enabled") is False else True
color = tag.get("color", None)
tagModel = Tag.objects.create(
name=tag["name"],
color=color,
parent=parent,
is_enabled=is_enabled,
dataset=dataset
)
logger.info("creating tag: {}".format(tagModel))
for child in [tag for tag in tag.get('children', [])]:
create_tag(child, tagModel)
for tag in TAGS:
create_tag(tag, None)
def create_place_tags():
df = pd.read_csv(FILEPATH)
ideas_not_vetted = []
for index, row in df.iterrows():
# get the relevant place:
if math.isnan(row['Mapseed ID']):
logger.info("row had invalid id: {}".format(row))
continue
mapseed_id = int(row['Mapseed ID'])
logger.info("parsing mapseed id: {}".format(mapseed_id))
if mapseed_id is None:
import ipdb
ipdb.set_trace()
if type(row['Pre-Vetting Status ']) == float and math.isnan(row['Pre-Vetting Status ']):
logger.info("row has invalid prevet status: {}".format(row))
ideas_not_vetted.append(mapseed_id)
continue
status = row['Pre-Vetting Status '].strip()
tag_name = TAG_MAPPINGS.get(status, None)
logger.info("tag name: {}".format(tag_name))
if tag_name is None:
logger.info("no tag name for place id: {}".format(mapseed_id))
raise ValueError("no tag mapping for prevet status: {}". format(status))
continue
# get the relevant tag:
tag = Tag.objects.get(name=tag_name)
logger.info("Tag: {}".format(tag))
place = Place.objects.get(id=mapseed_id)
logger.info("place: {}".format(tag))
PlaceTag.objects.create(tag=tag, place=place)
logger.info("ideas not vetted: {}".format(ideas_not_vetted))
class Command(BaseCommand):
help = """
Ingest PlaceTags from a spreadsheet into our app.
"""
def handle(self, *args, **options):
with transaction.atomic():
create_tags()
create_place_tags()
| smartercleanup/api | src/sa_api_v2/management/commands/ingestPrevetTags.py | Python | gpl-3.0 | 4,460 |
#-------------------------------------------------------------------------------
# Copyright (c) 2014 Proxima Centauri srl <[email protected]>.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the GNU Public License v3.0
# which accompanies this distribution, and is available at
# http://www.gnu.org/licenses/gpl.html
#
# Contributors:
# Proxima Centauri srl <[email protected]> - bug fixes
# Peter Kropf (http://www.owfs.org) - original implementation
#-------------------------------------------------------------------------------
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
::BOH
$Id: __init__.py,v 1.5 2010/09/13 16:09:30 alfille Exp $
$HeadURL: http://subversion/stuff/svn/owfs/trunk/ow/__init__.py $
Copyright (c) 2006 Peter Kropf. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
::EOH
1-wire sensor network interface. ownet provides standalone access to a
owserver without the need to compile the core ow libraries on the
local system. As a result, ownet can run on almost any platform that
support Python.
OWFS is an open source project developed by Paul Alfille and hosted at
http://www.owfs.org
"""
# avoid error with python2.2
from __future__ import generators
import sys
import os
from connection import Connection
__author__ = 'Peter Kropf'
__email__ = '[email protected]'
__version__ = '0.3' + '-' + '$Id: __init__.py,v 1.5 2010/09/13 16:09:30 alfille Exp $'.split()[2]
#
# exceptions used and thrown by the ownet classes
#
class exError(Exception):
"""base exception for all one wire raised exceptions."""
class exErrorValue(exError):
"""Base exception for all one wire raised exceptions with a value."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class exNoController(exError):
"""Exception raised when a controller cannot be initialized."""
class exNotInitialized(exError):
"""Exception raised when a controller has not been initialized."""
class exUnknownSensor(exErrorValue):
"""Exception raised when a specified sensor is not found."""
#
# _server and _port are the default server and port values to be used
# if a Sensor is initialized without specifying a server and port.
#
_server = None
_port = None
#
# Initialize and cleanup the _server and _port default values.
#
def init(iface):
"""
Initialize the interface mechanism to be used for communications
to the 1-wire network. Only socket connections to owserver are
supported.
Examples:
ownet.init('remote_system:3003')
Will initialize the 1-wire interface to use the owserver running
on remote_system on port 3003.
"""
#print 'ownet.init(%s)' % iface
global _server
global _port
pair = iface.split(':')
if len(pair) != 2:
raise exNoController
_server = pair[0]
_port = pair[1]
def finish():
"""
Cleanup the OWFS library, freeing any used resources.
"""
#print 'ownet.finish()'
global _server
global _port
_server = None
_port = None
#
# 1-wire sensors
#
class Sensor(object):
"""
A Sensor is the basic component of a 1-wire network. It represents
a individual 1-wire element as it exists on the network.
"""
def __init__(self, path, server = None, port = None, connection=None):
"""
Create a new Sensor as it exists at the specified path.
"""
#print 'Sensor.__init__(%s, server="%s", port=%s)' % (path, str(server), str(port))
# setup the connection to use for connunication with the owsensor server
if connection:
self._connection = connection
elif not server or not port:
global _server
global _port
if not _server or not _port:
raise exNotInitialized
else:
self._connection = Connection(_server, int(_port))
else:
self._connection = Connection(server, port)
self._attrs = {}
if path == '/':
self._path = path
self._useCache = True
elif path == '/uncached':
self._path = '/'
self._useCache = False
else:
if path[:len('/uncached')] == '/uncached':
self._path = path[len('/uncached'):]
self._useCache = False
else:
self._path = path
self._useCache = True
self.useCache(self._useCache)
def __str__(self):
"""
Print a string representation of the Sensor in the form of:
server:port/path - type
Example:
>>> print Sensor('/')
xyzzy:9876/ - DS9490
"""
#print 'Sensor.__str__'
return "%s%s - %s" % (str(self._connection), self._usePath, self._type)
def __repr__(self):
"""
Print a representation of the Sensor in the form of:
Sensor(path)
Example:
>>> Sensor('/')
Sensor("/", server="xyzzy", port=9876)
"""
#print 'Sensor.__repr__'
return 'Sensor("%s", server="%s", port=%i)' % (self._usePath, self._connection._server, self._connection._port)
def __eq__(self, other):
"""
Two sensors are considered equal if their paths are
equal. This is done by comparing their _path attributes so
that cached and uncached Sensors compare equal.
Examples:
>>> Sensor('/') == Sensor('/1F.440701000000')
False
>>> Sensor('/') == Sensor('/uncached')
True
"""
#print 'Sensor.__eq__(%s)' % str(other)
return self._path == other._path
def __hash__(self):
"""
Return a hash for the Sensor object's name. This allows
Sensors to be used better in sets.Set.
"""
#print 'Sensor.__hash__'
return hash(self._path)
def __getattr__(self, name):
"""
Retreive an attribute from the sensor. __getattr__ is called
only if the named item doesn't exist in the Sensor's
namespace. If it's not in the namespace, look for the attribute
on the physical sensor.
Usage:
s = ownet.Sensor('/1F.5D0B01000000')
print s.family, s.PIO_0
will result in the family and PIO.0 values being read from the
sensor and printed. In this example, the family would be 1F
and thr PIO.0 might be 1.
"""
try:
#print 'Sensor.__getattr__(%s)' % name
attr = self._connection.read(object.__getattribute__(self, '_attrs')[name])
except KeyError:
raise AttributeError, name
return attr
def __setattr__(self, name, value):
"""
Set the value of a sensor attribute. This is accomplished by
first determining if the physical sensor has the named
attribute. If it does, then the value is written to the
name. Otherwise, the Sensor's dictionary is updated with the
name and value.
Usage:
s = ownet.Sensor('/1F.5D0B01000000')
s.PIO_1 = '1'
will set the value of PIO.1 to 1.
"""
#print 'Sensor.__setattr__(%s, %s)' % (name, value)
# Life can get tricky when using __setattr__. Self doesn't
# have an _attrs atribute when it's initially created. _attrs
# is only there after it's been set in __init__. So we can
# only reference it if it's already been added.
if hasattr(self, '_attrs'):
if name in self._attrs:
self._connection.write(self._attrs[name], value)
else:
self.__dict__[name] = value
else:
self.__dict__[name] = value
def useCache(self, use_cache):
"""
Set the sensor to use the underlying owfs cache (or not)
depending on the use_cache parameter.
Usage:
s = ownet.Sensor('/1F.5D0B01000000')
s.useCache(False)
will set the internal sensor path to /uncached/1F.5D0B01000000.
Also:
s = ownet.Sensor('/uncached/1F.5D0B01000000')
s.useCache(True)
will set the internal sensor path to /1F.5D0B01000000.
"""
#print 'Sensor.useCache(%s)' % str(use_cache)
self._useCache = use_cache
if self._useCache:
self._usePath = self._path
else:
if self._path == '/':
self._usePath = '/uncached'
else:
self._usePath = '/uncached' + self._path
if self._path == '/':
self._type = self._connection.read('/system/adapter/name.0')
else:
self._type = self._connection.read('%s/type' % self._usePath)
self._attrs = dict([(n.replace('.', '_'), self._usePath + '/' + n) for n in self.entries()])
def entries(self):
"""
Generator which yields the attributes of a sensor.
"""
#print 'Sensor.entries()'
list = self._connection.dir(self._usePath)
if self._path == '/':
for entry in list:
if not '/' in entry:
yield entry
else:
for entry in list:
yield entry.split('/')[-1]
def entryList(self):
"""
List of the sensor's attributes.
Example:
>>> Sensor("/10.B7B64D000800").entryList()
['address', 'crc8', 'die', 'family', 'id', 'power',
'present', 'temperature', 'temphigh', 'templow',
'trim', 'trimblanket', 'trimvalid', 'type']
"""
#print 'Sensor.entryList()'
return [e for e in self.entries()]
def sensors(self, names = ['main', 'aux']):
"""
Generator which yields all the sensors that are associated
with the current sensor.
In the event that the current sensor is the adapter (such as a
DS9490 USB adapter) the list of sensors directly attached to
the 1-wire network will be yielded.
In the event that the current sensor is a microlan controller
(such as a DS2409) the list of directories found in the names
list parameter will be searched and any sensors found will be
yielded. The names parameter defaults to ['main', 'aux'].
"""
#print 'Sensor.sensors(%s)' % str(names)
if self._type == 'DS2409':
for branch in names:
path = self._usePath + '/' + branch
list = filter(lambda x: '/' in x, self._connection.dir(path))
if list:
namelist = ','.join(list)
#print 'Sensor.sensors namelist(%s)' % str(namelist)
for branch_entry in namelist.split(','):
# print 'branch_entry(%s)' % str(branch_entry)
try:
self._connection.read(branch_entry + '/type')
except exUnknownSensor, ex:
continue
yield Sensor(branch_entry, connection=self._connection)
else:
list = self._connection.dir(self._usePath)
if self._path == '/':
for entry in list:
if '/' in entry:
yield Sensor(entry, connection=self._connection)
def sensorList(self, names = ['main', 'aux']):
"""
List of all the sensors that are associated with the current
sensor.
In the event that the current sensor is the adapter (such as a
DS9490 USB adapter) the list of sensors directly attached to
the 1-wire network will be yielded.
In the event that the current sensor is a microlan controller
(such as a DS2409) the list of directories found in the names
list parameter will be searched and any sensors found will be
yielded. The names parameter defaults to ['main', 'aux'].
Example:
>>> Sensor("/1F.440701000000").sensorList()
[Sensor("/1F.440701000000/main/29.400900000000")]
"""
#print 'Sensor.sensorList(%s)' % str(names)
return [s for s in self.sensors()]
def find(self, **keywords):
"""
Generator which yields all the sensors whose attributes match
those past in. By default, any matched attribute will yield a
sensor. If the parameter all is passed to the find call, then
all the supplied attributes must match to yield a sensor.
Usage:
for s in Sensor('/').find(type = 'DS2408'):
print s
will print all the sensors whose type is DS2408.
root = Sensor('/')
print len([s for s in root.find(all = True,
family = '1F',
type = 'DS2409')])
will print the count of sensors whose family is 1F and whose
type is DS2409.
"""
#print 'Sensor.find', keywords
#recursion = keywords.pop('recursion', False)
all = keywords.pop('all', False)
for sensor in self.sensors():
match = 0
for attribute in keywords:
if hasattr(sensor, attribute):
if keywords[attribute]:
if getattr(sensor, attribute) == keywords[attribute]:
match = match + 1
else:
if hasattr(sensor, attribute):
match = match + 1
if not all and match:
yield sensor
elif all and match == len(keywords):
yield sensor
| myna-project/modbus | ownet/__init__.py | Python | gpl-3.0 | 14,612 |
import module1
import module2 | github/codeql | python/ql/test/query-tests/Metrics/imports/entry.py | Python | mit | 29 |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2016, MuChu Hsu
Contributed by Muchu Hsu ([email protected])
This file is part of BSD license
<https://opensource.org/licenses/BSD-3-Clause>
"""
import unittest
import logging
from findfine_crawler.crawlerForExRate import CrawlerForExRate
"""
測試 爬取 Yahoo 外幣投資頁面匯率資料
"""
class CrawlerForExRateTest(unittest.TestCase):
#準備
def setUp(self):
logging.basicConfig(level=logging.INFO)
self.crawler = CrawlerForExRate()
self.crawler.initDriver()
#收尾
def tearDown(self):
self.crawler.quitDriver()
#測試爬取 yahoo currency page
def test_crawlYahooCurrencyPage(self):
logging.info("CrawlerForExRateTest.test_crawlYahooCurrencyPage")
self.crawler.crawlYahooCurrencyPage()
#測試開始
if __name__ == "__main__":
unittest.main(exit=False)
| muchu1983/104_findfine | test/unit/test_crawlerForExRate.py | Python | bsd-3-clause | 911 |
from .serializers import ProgramSerializer, ProgramProductSerializer
from partners.models import Program, ProgramProduct
from core.api.views import BaseModelViewSet
class ProgramViewSet(BaseModelViewSet):
queryset = Program.objects.all()
serializer_class = ProgramSerializer
class ProgramProductViewSet(BaseModelViewSet):
queryset = ProgramProduct.objects.all()
serializer_class = ProgramProductSerializer
| eHealthAfrica/LMIS | LMIS/partners/api/views.py | Python | gpl-2.0 | 426 |
dict = { 1: 'a', 2: 'b', 3: 'c' } | romankagan/DDBWorkbench | python/testData/formatter/spaceWithinBraces_after.py | Python | apache-2.0 | 33 |
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from gppylib.commands.base import Command
import tinctest
from tinctest.lib import Gpdiff, local_path
from mpp.gpdb.tests.storage.fts.fts_transitions.fts_transitions import FTSTestCase
from mpp.lib.config import GPDBConfig
from mpp.lib.filerep_util import Filerepe2e_Util
from mpp.lib.gprecoverseg import GpRecover
from mpp.lib.PSQL import PSQL
from mpp.models import MPPTestCase
class ResyncBug(MPPTestCase):
def __init__(self, methodName):
super(ResyncBug, self).__init__(methodName)
def run_sql(self, filename):
sql_file = local_path('sql/%s.sql' % filename)
ans_file = local_path('expected/%s.ans' % filename)
out_file = local_path('output/%s.out' % filename)
assert PSQL.run_sql_file(sql_file, out_file), sql_file
assert Gpdiff.are_files_equal(out_file, ans_file), out_file
def test_resync_ct_blocks_per_query(self):
'''Catch a bug in resync that manifests only after rebalance.
The logic used by a resync worker to obtain changed blocks
from CT log had a bug. The SQL query used to obtain a batch
of changed blocks from CT log was incorrectly using LSN to
filter out changed blocks. All of the following must be true
for the bug to occur:
* More than gp_filerep_ct_batch_size blocks of a relation
are changed on a segment in changetracking.
* A block with a higher number is changed earlier (lower
LSN) than lower numbered blocks.
* The first batch of changed blocks obtained by resync worker
from CT log for this relation contains only lower
(according to block number) blocks. The higher block with
lower LSN is not included in this batch. Another query
must be run against CT log to obtain this block.
* The SQL query used to obtain next batch of changed blocks
for this relation contains incorrect WHERE clause involving
a filter based on LSN of previously obtained blocks. The
higher numbered block is missed out - not returned by the
query as changed block for the relation. The block is
never shipped from primary to mirror, resulting in data
loss. The test aims to verify that this doesn't happen as
the bug is now fixed.
'''
config = GPDBConfig()
assert (config.is_not_insync_segments() &
config.is_balanced_segments()), 'cluster not in-sync and balanced'
# Create table and insert data so that adequate number of
# blocks are occupied.
self.run_sql('resync_bug_setup')
# Bring down primaries and transition mirrors to
# changetracking.
filerep = Filerepe2e_Util()
filerep.inject_fault(y='fault', f='segment_probe_response',
r='primary')
# Trigger the fault by running a sql file.
PSQL.run_sql_file(local_path('test_ddl.sql'))
filerep.wait_till_change_tracking_transition()
# Set gp_filerep_ct_batch_size = 3.
cmd = Command('reduce resync batch size',
'gpconfig -c gp_filerep_ct_batch_size -v 3')
cmd.run()
assert cmd.get_results().rc == 0, 'gpconfig failed'
cmd = Command('load updated config', 'gpstop -au')
cmd.run()
assert cmd.get_results().rc == 0, '"gpstop -au" failed'
self.run_sql('change_blocks_in_ct')
# Capture change tracking log contents from the segment of
# interest for debugging, in case the test fails.
(host, port) = GPDBConfig().get_hostandport_of_segment(0, 'p')
assert PSQL.run_sql_file_utility_mode(
sql_file=local_path('sql/ct_log_contents.sql'),
out_file=local_path('output/ct_log_contents.out'),
host=host, port=port), sql_file
gprecover = GpRecover(GPDBConfig())
gprecover.incremental(False)
gprecover.wait_till_insync_transition()
# Rebalance, so that original primary is back in the role
gprecover = GpRecover(GPDBConfig())
gprecover.rebalance()
gprecover.wait_till_insync_transition()
# Reset gp_filerep_ct_batch_size
cmd = Command('reset resync batch size',
'gpconfig -r gp_filerep_ct_batch_size')
cmd.run()
assert cmd.get_results().rc == 0, 'gpconfig failed'
cmd = Command('load updated config', 'gpstop -au')
cmd.run()
assert cmd.get_results().rc == 0, '"gpstop -au" failed'
self.run_sql('select_after_rebalance')
class FtsTransitionsPart03(FTSTestCase):
''' State of FTS at different fault points
'''
def __init__(self, methodName):
super(FtsTransitionsPart03,self).__init__(methodName)
def test_primary_resync_postmaster_reset_with_faults(self):
'''
@data_provider pr_faults
'''
filerep_fault = self.test_data[1][0]
fault_type = self.test_data[1][1]
filerep_role = self.test_data[1][2]
gpstate_role = self.test_data[1][3]
gprecover = self.test_data[1][4]
tinctest.logger.info("\n ===============================================")
tinctest.logger.info("\n Starting New Test: %s " % self.test_data[0][1])
tinctest.logger.info("\n ===============================================")
self.primary_resync_postmaster_reset_with_faults(filerep_fault, fault_type, filerep_role, gpstate_role, gprecover)
def test_mirror_resync_postmaster_reset_with_faults(self):
tinctest.logger.info("\n ===============================================")
tinctest.logger.info("\n Starting New Test: test_mirror_resync_postmaster_reset_with_faults ")
tinctest.logger.info("\n ===============================================")
self.mirror_resync_postmaster_reset_with_faults()
@tinctest.dataProvider('pr_faults')
def test_pr_faults():
data = {'test_27_primary_resync_postmaster_reset_checkpoint': ['checkpoint','panic', 'primary', 'mirror', 'no'],
'test_28_primary_resync_postmaster_reset_filerep_flush': ['filerep_flush','panic','primary', 'mirror', 'no'],
'test_29_primary_resync_postmaster_reset_filerep_consumer': ['filerep_consumer','panic', 'primary', 'mirror', 'no'],
'test_30_mirror_resync_process_missing_failover': ['filerep_sender','error', 'mirror', 'mirror', 'yes'],
'test_31_primary_resync_process_missing_failover': ['filerep_sender','error', 'primary', 'primary', 'yes'],
'test_32_mirror_resync_deadlock_failover': ['filerep_sender', 'infinite_loop', 'mirror', 'mirror', 'yes'],
'test_33_primary_resync_deadlock_failover': ['filerep_sender', 'infinite_loop', 'primary', 'primary', 'yes'],
'test_34_primary_resync_filerep_network_failover': ['filerep_consumer', 'panic', 'mirror', 'mirror', 'yes'],
'test_36_primary_resync_postmaster_missing_failover': ['postmaster', 'panic', 'primary', 'mirror', 'no'],
'test_37_primary_resync_system_failover': ['filerep_resync_worker_read', 'fault', 'primary', 'mirror', 'yes'],
'test_38_primary_resync_mirror_cannot_keepup_failover': ['filerep_receiver', 'sleep', 'primary', 'mirror', 'no'],
'test_39_mirror_resync_filerep_network': ['filerep_receiver', 'fault', 'mirror', 'primary', 'yes'],
'test_40_mirror_resync_system_failover': ['filerep_flush', 'fault', 'mirror', 'primary', 'yes'],
'test_41_mirror_resync_postmaster_missing_failover': ['postmaster', 'panic', 'mirror', 'primary', 'yes'],
'test_46_primary_resync_postmaster_reset_filerep_sender': ['filerep_sender', 'panic', 'primary', 'primary', 'yes']}
return data
| Quikling/gpdb | src/test/tinc/tincrepo/mpp/gpdb/tests/storage/fts/fts_transitions/test_fts_transitions_03.py | Python | apache-2.0 | 8,502 |
#https://pypi.python.org/pypi/enum34
#from enum import Enum
#class ResponseCode(Enum):
# OK = 1
# WARNING = 2
# ERROR = 3
class ResponseMessage:
def __init__(self, errorCode, errorMessage):
self.errorCode = errorCode
self.errorMessage = errorMessage
def getErrorCode(self):
return self.errorCode | andriyboychenko/books-online | catalogue/entities/ResponseMessage.py | Python | apache-2.0 | 346 |
'''
Created by auto_sdk on 2015.04.21
'''
from aliyun.api.base import RestApi
class Mts20140618AddMediaRequest(RestApi):
def __init__(self,domain='mts.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.Description = None
self.InputFileUrl = None
self.Tags = None
self.Title = None
def getapiname(self):
return 'mts.aliyuncs.com.AddMedia.2014-06-18'
| wanghe4096/website | aliyun/api/rest/Mts20140618AddMediaRequest.py | Python | bsd-2-clause | 392 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Runs the server for backend application `prepojenia`."""
import argparse
import json
import os
import sys
from paste import httpserver
import webapp2
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../data/db')))
from db import DatabaseConnection
from relations import Relations
class MyServer(webapp2.RequestHandler):
"""Abstract request handler, to be subclasses by server hooks."""
def get(self):
"""Implements actual hook logic and responds to requests."""
raise NotImplementedError('Must implement method `get`.')
def _parse_int(self, parameter, default=None, max_value=None):
"""Attempts to parse an integer GET `parameter`."""
if default and (parameter not in self.request.GET):
return default
try:
value = int(self.request.GET[parameter])
if max_value and (value > max_value):
self.abort(400, detail='Parameter "%s" must be <= %d' % (
parameter, max_value))
return value
except:
self.abort(400, detail='Parameter "%s" must be integer' % (
parameter))
return 0
def _parse_eid_list(self, parameter, limit=50):
"""Parses eids from comma-separated GET string `parameter`."""
try:
eids = [
int(x)
for x in (self.request.GET[parameter].split(','))[:limit]
]
return eids
except:
self.abort(400, detail='Could not parse %s' % parameter)
return []
def parse_start_end(self):
"""Parses parameters from comma-separated integer list format."""
try:
start = [int(x) for x in (self.request.GET["eid1"].split(","))[:50]]
end = [int(x) for x in (self.request.GET["eid2"].split(","))[:50]]
return start, end
except:
self.abort(400, detail='Could not parse start and/or end eIDs')
return 0, 0
def _add_entity_info_to_vertices(self, vertices):
"""Endows vertices with information about their entities."""
# No work to be done if the subgraph is empty. This is necessary
# as PostgreSQL does not handle an empty WHERE IN clause.
if len(vertices) == 0:
return
db = webapp2.get_app().registry['db']
vertices_eids = [v['eid'] for v in vertices]
q = """
SELECT
entities.id AS eid,
entities.name,
entity_flags.trade_with_government AS trade_with_government,
entity_flags.political_entity AS political_entity,
entity_flags.contact_with_politics AS contact_with_politics
FROM entities
LEFT JOIN entity_flags ON entity_flags.eid=entities.id
WHERE entities.id IN %s;
"""
q_data = [tuple(vertices_eids)]
eid_to_entity = {row['eid']: row for row in db.query(q, q_data)}
for vertex in vertices:
entity = eid_to_entity[vertex['eid']]
vertex['entity_name'] = entity['name']
for key in ['trade_with_government', 'political_entity',
'contact_with_politics']:
vertex[key] = entity[key]
def returnJSON(self, j):
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.dumps(j, separators=(',', ':')))
class AShortestPath(MyServer):
def get(self):
start, end = self.parse_start_end()
relations = webapp2.get_app().registry['relations']
response = relations.bfs(start, end) # Assumes unit edge lengths!
self.returnJSON(response)
class Subgraph(MyServer):
def get(self):
relations = webapp2.get_app().registry['relations']
# Compute the subgraph to return:
start = self._parse_eid_list('eid1')
end = self._parse_eid_list('eid2')
subgraph = relations.subgraph(start, end)
# Endow returning vertices with entity names (and other info).
self._add_entity_info_to_vertices(subgraph['vertices'])
self.returnJSON(subgraph)
class NotableConnections(MyServer):
"""Returns subgraphs of connections to "notable" entities."""
def get(self):
relations = webapp2.get_app().registry['relations']
notable_eids = webapp2.get_app().registry['notable_eids']
# Parse URL parameters:
start = self._parse_eid_list('eid')
radius = self._parse_int('radius', default=6)
max_nodes_to_explore = self._parse_int(
'max_explore', default=100000, max_value=1000000)
target_order = self._parse_int(
'target_order', default=50, max_value=200)
max_order = self._parse_int(
'max_order', default=100, max_value=200)
# Special case due to current media coverage.
# first_eid_name = webapp2.get_app().registry['db'].query(
# 'SELECT name FROM entities WHERE id=%s;',
# [start[0]]
# )[0]['name']
# if 'Kočner' in first_eid_name:
# target_order = 120
# max_order = 120
# Build subgraph of connections to notable entities:
subgraph = relations.get_notable_connections_subgraph(
start, notable_eids, radius, max_nodes_to_explore, target_order,
max_order)
# Endow returning vertices with entity names (and other info).
self._add_entity_info_to_vertices(subgraph['vertices'])
self.returnJSON(subgraph)
app = webapp2.WSGIApplication([
('/a_shortest_path', AShortestPath),
('/subgraph', Subgraph),
('/notable_connections', NotableConnections),
], debug=False)
def _initialise_relations(db, max_relations_to_load):
"""Returns Relations object build from edges in database `db`."""
# Retrieve list of relationship edges:
q = """
SELECT eid, eid_relation, stakeholder_type_id
FROM related WHERE eid <> eid_relation
LIMIT %s;
"""
q_data = [max_relations_to_load]
edge_list = []
for row in db.query(q, q_data):
edge_type = row['stakeholder_type_id'] or 0
edge_list.append(
(row['eid'], row['eid_relation'], +1 * edge_type))
edge_list.append(
(row['eid_relation'], row['eid'], -1 * edge_type))
print('[OK] Received %d edges.' % len(edge_list))
# Construct and return Relations object from the edge list:
return Relations(edge_list)
def _initialise_notable_eids(db):
"""Returns set of eids corresponding to "notable" entities."""
rows = db.query("""
SELECT eid FROM entity_flags
WHERE political_entity=TRUE;
""")
notable_eids = set(row["eid"] for row in rows)
print('[OK] Received %d notable eIDs.' % len(notable_eids))
return notable_eids
def initialise_app(max_relations_to_load):
"""Precomputes values shared across requests to this app.
The registry property is intended for storing these precomputed
values, so as to avoid global variables.
"""
# Connect to the database:
db = DatabaseConnection(path_config=os.path.abspath(os.path.join(os.path.dirname(__file__), 'db_config.yaml')))
schema = db.get_latest_schema('prod_')
db.execute('SET search_path to ' + schema + ';')
app.registry['db'] = db
# Build Relations object and a set of notable eIDs:
app.registry['relations'] = _initialise_relations(db, max_relations_to_load)
app.registry['notable_eids'] = _initialise_notable_eids(db)
def main(args):
# Initialise the app by precomputing values:
initialise_app(args.max_relations_to_load)
# Start the server:
host, port = args.listen.split(':')
httpserver.serve(
app,
host=host,
port=port,
request_queue_size=128,
use_threadpool=True,
threadpool_workers=32,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--listen',
help='host:port to listen on',
default='127.0.0.1:8081')
parser.add_argument('--max_relations_to_load',
type=int,
help='Maximum # of edges to load from database.',
default=123456789)
args = parser.parse_args()
main(args)
| verejnedigital/verejne.digital | prepojenia/server.py | Python | apache-2.0 | 8,389 |
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields
from netforce import access
class Role(Model):
_name = "role"
_string = "Role"
_key = ["name"]
_fields = {
"name": fields.Char("Name", required=True, search=True),
"parent_id": fields.Many2One("role", "Parent"),
"description": fields.Text("Description"),
"comments": fields.One2Many("message", "related_id", "Comments"),
}
_order = "name"
Role.register()
| sidzan/netforce | netforce_general/netforce_general/models/role.py | Python | mit | 1,560 |
###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
version = '0.9.0'
| CMUSV-VisTrails/WorkflowRecommendation | vistrails/db/versions/v0_9_0/__init__.py | Python | bsd-3-clause | 1,861 |
from datetime import datetime
from django.db import models
from django.utils import timezone
from applications.validators import validate_phone_number
YEAR_CHOICES = (
(1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
)
class ApplicationPeriod(models.Model):
name = models.CharField(max_length=50, verbose_name="Navn")
period_start = models.DateTimeField(default=timezone.now, blank=False)
period_end = models.DateTimeField(default=timezone.now, blank=False)
def __str__(self):
return self.name
def status(self):
if self.period_end < datetime.now():
return "late"
elif datetime.now() < self.period_start:
return "early"
return "open"
def is_open(self):
return self.status() == "open"
class ApplicationGroup(models.Model):
name = models.CharField(max_length=50, verbose_name="Gruppenavn")
text_main = models.TextField(verbose_name="Om gruppen generelt", blank=False)
text_structure = models.TextField(verbose_name="Om gruppens struktur", blank=True)
text_workload = models.TextField(
verbose_name="Om gruppens arbeidsmengde", blank=True
)
project_group = models.BooleanField(
verbose_name="Gruppen tilhører prosjektgruppen", default=False
)
def __str__(self):
return self.name
class Application(models.Model):
name = models.CharField(max_length=50, verbose_name="Navn")
email = models.EmailField(verbose_name="Email")
phone = models.CharField(
max_length=8, validators=[validate_phone_number], verbose_name="Telefon"
)
study = models.CharField(max_length=255, verbose_name="Studieprogram")
year = models.IntegerField(
blank=False,
verbose_name="Årstrinn",
choices=YEAR_CHOICES,
default=YEAR_CHOICES[0],
)
group_choice = models.ManyToManyField(
ApplicationGroup, through="ApplicationGroupChoice"
)
knowledge_of_hs = models.CharField(
max_length=1000, verbose_name="Hvordan fikk du vite om Hackerspace?"
)
about = models.TextField(verbose_name="Litt om deg selv")
application_text = models.TextField(verbose_name="Hvorfor søker du hackerspace?")
application_date = models.DateTimeField(default=timezone.now, blank=False)
def __str__(self):
return self.name
class ApplicationGroupChoice(models.Model):
"""Intermediate model to add priority attribute to application group choices"""
application = models.ForeignKey(Application, on_delete=models.CASCADE)
group = models.ForeignKey(ApplicationGroup, on_delete=models.CASCADE)
priority = models.PositiveIntegerField(null=True)
| hackerspace-ntnu/website | applications/models.py | Python | mit | 2,686 |
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
"API_ENDPOINTS_1_0",
"API_ENDPOINTS_2_0",
"API_VERSIONS",
"INSTANCE_TYPES",
"MAX_VIRTIO_CONTROLLERS",
"MAX_VIRTIO_UNITS",
]
# API end-points
API_ENDPOINTS_1_0 = {
"zrh": {
"name": "Zurich",
"country": "Switzerland",
"host": "api.zrh.cloudsigma.com",
},
"lvs": {
"name": "Las Vegas",
"country": "United States",
"host": "api.lvs.cloudsigma.com",
},
}
API_ENDPOINTS_2_0 = {
"zrh": {
"name": "Zurich, Switzerland",
"country": "Switzerland",
"host": "zrh.cloudsigma.com",
},
"sjc": {
"name": "San Jose, CA",
"country": "United States",
"host": "sjc.cloudsigma.com",
},
"wdc": {
"name": "Washington, DC",
"country": "United States",
"host": "wdc.cloudsigma.com",
},
"hnl": {
"name": "Honolulu, HI",
"country": "United States",
"host": "hnl.cloudsigma.com",
},
"per": {
"name": "Perth, Australia",
"country": "Australia",
"host": "per.cloudsigma.com",
},
"mnl": {
"name": "Manila, Philippines",
"country": "Philippines",
"host": "mnl.cloudsigma.com",
},
"fra": {
"name": "Frankfurt, Germany",
"country": "Germany",
"host": "fra.cloudsigma.com",
},
"mel": {
"name": "Melbourne, Australia",
"country": "Australia",
"host": "mel.cloudsigma.com",
},
"dbl": {
"name": "Dublin, Ireland",
"country": "Ireland",
"host": "ec.servecentric.com",
},
"tyo": {"name": "Tokyo, Japan", "country": "Japan", "host": "tyo.cloudsigma.com"},
"crk": {
"name": "Clark, Philippines",
"country": "Philippines",
"host": "crk.cloudsigma.com",
},
"mnl2": {
"name": "Manila-2, Philippines",
"country": "Philippines",
"host": "mnl2.cloudsigma.com",
},
"ruh": {
"name": "Riyadh, Saudi Arabia",
"country": "Saudi Arabia",
"host": "ruh.cloudsigma.com",
},
"bdn": {"name": "Boden, Sweden", "country": "Sweden", "host": "cloud.hydro66.com"},
"gva": {
"name": "Geneva, Switzerland",
"country": "Switzerland",
"host": "gva.cloudsigma.com",
},
}
DEFAULT_REGION = "zrh"
# Supported API versions.
API_VERSIONS = ["1.0" "2.0"] # old and deprecated
DEFAULT_API_VERSION = "2.0"
# CloudSigma doesn't specify special instance types.
# Basically for CPU any value between 0.5 GHz and 20.0 GHz should work,
# 500 MB to 32000 MB for ram
# and 1 GB to 1024 GB for hard drive size.
# Plans in this file are based on examples listed on https://cloudsigma
# .com/pricing/
INSTANCE_TYPES = [
{
"id": "small-1",
"name": "small-1, 1 CPUs, 512MB RAM, 50GB disk",
"cpu": 1,
"memory": 512,
"disk": 50,
"bandwidth": None,
},
{
"id": "small-2",
"name": "small-2, 1 CPUs, 1024MB RAM, 50GB disk",
"cpu": 1,
"memory": 1024,
"disk": 50,
"bandwidth": None,
},
{
"id": "small-3",
"name": "small-3, 1 CPUs, 2048MB RAM, 50GB disk",
"cpu": 1,
"memory": 2048,
"disk": 50,
"bandwidth": None,
},
{
"id": "medium-1",
"name": "medium-1, 2 CPUs, 2048MB RAM, 50GB disk",
"cpu": 2,
"memory": 2048,
"disk": 50,
"bandwidth": None,
},
{
"id": "medium-2",
"name": "medium-2, 2 CPUs, 4096MB RAM, 60GB disk",
"cpu": 2,
"memory": 4096,
"disk": 60,
"bandwidth": None,
},
{
"id": "medium-3",
"name": "medium-3, 4 CPUs, 8192MB RAM, 80GB disk",
"cpu": 4,
"memory": 8192,
"disk": 80,
"bandwidth": None,
},
{
"id": "large-1",
"name": "large-1, 8 CPUs, 16384MB RAM, 160GB disk",
"cpu": 8,
"memory": 16384,
"disk": 160,
"bandwidth": None,
},
{
"id": "large-2",
"name": "large-2, 12 CPUs, 32768MB RAM, 320GB disk",
"cpu": 12,
"memory": 32768,
"disk": 320,
"bandwidth": None,
},
{
"id": "large-3",
"name": "large-3, 16 CPUs, 49152MB RAM, 480GB disk",
"cpu": 16,
"memory": 49152,
"disk": 480,
"bandwidth": None,
},
{
"id": "xlarge",
"name": "xlarge, 20 CPUs, 65536MB RAM, 640GB disk",
"cpu": 20,
"memory": 65536,
"disk": 640,
"bandwidth": None,
},
]
# mapping between cpus, ram, disk to example size attributes
SPECS_TO_SIZE = {
(1, 512, 50): {
"id": "small-1",
"name": "small-1, 1 CPUs, 512MB RAM, 50GB disk",
"cpu": 1,
"ram": 512,
"disk": 50,
"bandwidth": None,
"price": None,
},
(1, 1024, 50): {
"id": "small-2",
"name": "small-2, 1 CPUs, 1024MB RAM, 50GB disk",
"cpu": 1,
"ram": 1024,
"disk": 50,
"bandwidth": None,
"price": None,
},
(1, 2048, 50): {
"id": "small-3",
"name": "small-3, 1 CPUs, 2048MB RAM, 50GB disk",
"cpu": 1,
"ram": 2048,
"disk": 50,
"bandwidth": None,
"price": None,
},
(2, 2048, 50): {
"id": "medium-1",
"name": "medium-1, 2 CPUs, 2048MB RAM, 50GB disk",
"cpu": 2,
"ram": 2048,
"disk": 50,
"bandwidth": None,
"price": None,
},
(2, 4096, 60): {
"id": "medium-2",
"name": "medium-2, 2 CPUs, 4096MB RAM, 60GB disk",
"cpu": 2,
"ram": 4096,
"disk": 60,
"bandwidth": None,
"price": None,
},
(4, 8192, 80): {
"id": "medium-3",
"name": "medium-3, 4 CPUs, 8192MB RAM, 80GB disk",
"cpu": 4,
"ram": 8192,
"disk": 80,
"bandwidth": None,
"price": None,
},
(8, 16384, 160): {
"id": "large-1",
"name": "large-1, 8 CPUs, 16384MB RAM, 160GB disk",
"cpu": 8,
"ram": 16384,
"disk": 160,
"bandwidth": None,
"price": None,
},
(12, 32768, 320): {
"id": "large-2",
"name": "large-2, 12 CPUs, 32768MB RAM, 320GB disk",
"cpu": 12,
"ram": 32768,
"disk": 320,
"bandwidth": None,
"price": None,
},
(16, 49152, 480): {
"id": "large-3",
"name": "large-3, 16 CPUs, 49152MB RAM, 480GB disk",
"cpu": 16,
"ram": 49152,
"disk": 480,
"bandwidth": None,
"price": None,
},
(20, 65536, 640): {
"id": "xlarge",
"name": "xlarge, 20 CPUs, 65536MB RAM, 640GB disk",
"cpu": 20,
"ram": 65536,
"disk": 640,
"bandwidth": None,
"price": None,
},
}
MAX_VIRTIO_CONTROLLERS = 203
MAX_VIRTIO_UNITS = 4
| apache/libcloud | libcloud/common/cloudsigma.py | Python | apache-2.0 | 7,813 |
#!/usr/bin/env python3
# author: [email protected]
import visa # https://github.com/hgrecco/pyvisa
import numpy
import sys
import time
import k2450 # functions to talk to a keithley 2450 sourcemeter
import rs # grey's sheet resistance library
# for plotting
import matplotlib.pyplot as plt
plt.switch_backend("Qt5Agg")
# debugging/testing stuff
#visa.log_to_screen() # for debugging
#import timeit
# for the GUI
import pyqtGen
from PyQt5 import QtCore, QtGui, QtWidgets
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
#========= start print override stuff =========
# this stuff is for overriding print() so that
# any calls to it are mirrored to my gui's log pane
import builtins as __builtin__
systemPrint = __builtin__.print
myPrinter = None
import io
class MyPrinter(QtCore.QObject): # a class that holds the signals we'll need for passing around the log data
writeToLog = QtCore.pyqtSignal(str) # this signal sends the contents for the log
scrollLog = QtCore.pyqtSignal() # this signal tells the log to scroll to its max position
def print(*args, **kwargs): # overload the print() function
global myPrinter
if myPrinter is not None: # check to see if the gui has created myPrinter
stringBuf = io.StringIO()
kwargs['file'] = stringBuf
systemPrint(*args, **kwargs) # print to our string buffer
myPrinter.writeToLog.emit(stringBuf.getvalue()) # send the print to the gui
myPrinter.scrollLog.emit() # tell the gui to scroll the log
stringBuf.close()
kwargs['file'] = sys.stdout
return systemPrint(*args, **kwargs) # now do the print for real
__builtin__.print = print
#========= end print override stuff =========
# this is the thread where the sweep takes place
class sweepThread(QtCore.QThread):
def __init__(self, mainWindow, parent=None):
QtCore.QThread.__init__(self, parent)
self.mainWindow = mainWindow
def run(self):
self.mainWindow.ui.applyButton.setEnabled(False)
self.mainWindow.ui.sweepButton.setEnabled(False)
if not k2450.doSweep(self.mainWindow.sm):
print ("Failed to do forward sweep.")
else:
# get the forward data
[i,v] = k2450.fetchSweepData(self.mainWindow.sm,self.mainWindow.sweepParams)
self.mainWindow.ax1.clear()
if i is not None:
self.mainWindow.ax1.set_title('Forward Sweep Results',loc="right")
rs.plotSweep(i,v,self.mainWindow.ax1) # plot the sweep results
else:
print("Failed to fetch forward sweep data.")
# now do a reverse sweep
reverseParams = self.mainWindow.sweepParams.copy()
reverseParams['sweepStart'] = self.mainWindow.sweepParams['sweepEnd']
reverseParams['sweepEnd'] = self.mainWindow.sweepParams['sweepStart']
confRes = k2450.configureSweep(self.mainWindow.sm,reverseParams)
if confRes:
time.sleep(5)
if not confRes:
print ("Failed to configure reverse sweep.")
elif not k2450.doSweep(self.mainWindow.sm):
print ("Failed to do reverse sweep.")
else:
# get the reverse data
[i,v] = k2450.fetchSweepData(self.mainWindow.sm,reverseParams)
self.mainWindow.ax2.clear()
if i is not None:
self.mainWindow.ax2.set_title('Reverse Sweep Results',loc="right")
rs.plotSweep(i,v,self.mainWindow.ax2) # plot the sweep results
else:
print("Failed to fetch reverse sweep data.")
print('======================================')
self.mainWindow.ui.applyButton.setEnabled(True)
self.mainWindow.ui.sweepButton.setEnabled(True)
class MainWindow(QtWidgets.QMainWindow):
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
self.setup = False # to keep track of if the sourcemeter is setup or not
self.configured = False # to keep track of if the sweep is configured or not
# Set up the user interface from Designer
self.ui = pyqtGen.Ui_MainWindow()
self.ui.setupUi(self)
#recall settings
self.settings = QtCore.QSettings("greyltc", "rs-tool-gui")
for thisKey in self.settings.allKeys():
if hasattr(self.ui,thisKey):
targetObject = getattr(self.ui,thisKey)
if type(targetObject) is QtWidgets.QLineEdit:
targetObject.setText(self.settings.value(thisKey))
elif (type(targetObject) is QtWidgets.QCheckBox):
targetObject.setCheckState(self.settings.value(thisKey,type=int))
elif (type(targetObject) is QtWidgets.QSpinBox):
targetObject.setValue(self.settings.value(thisKey,type=int))
elif type(targetObject) is QtWidgets.QDoubleSpinBox:
targetObject.setValue(self.settings.value(thisKey,type=float))
else:
print('Unexpected object type while loading settings.')
self.ui.stepDelayDoubleSpinBox.setEnabled(not self.ui.autoDelayCheckBox.isChecked())
# tell the UI where to draw put matplotlib plots
fig = plt.figure(facecolor="white")
self.ax1 = fig.add_subplot(2,1,1)
self.ax2 = fig.add_subplot(2,1,2)
vBox = QtWidgets.QVBoxLayout()
vBox.addWidget(FigureCanvas(fig))
self.ui.plotTab.setLayout(vBox)
# set up things for our log pane
global myPrinter
myPrinter = MyPrinter()
myPrinter.writeToLog.connect(self.ui.textBrowser.insertPlainText)
myPrinter.scrollLog.connect(self.scrollLog)
self.ui.textBrowser.setTextBackgroundColor(QtGui.QColor('black'))
self.ui.textBrowser.setTextColor(QtGui.QColor(0, 255, 0))
#self.ui.textBrowser.setFontWeight(QtGui.QFont.Bold)
self.ui.textBrowser.setAutoFillBackground(True)
p = self.ui.textBrowser.palette()
p.setBrush(9, QtGui.QColor('black'))
#p.setColor(self.ui.textBrowser.backgroundRole, QtGui.QColor('black'))
self.ui.textBrowser.setPalette(p)
# for now put these here, should be initiated by user later:
self.rm = visa.ResourceManager('@py') # select pyvisa-py (pure python) backend
# connect up the sweep button
self.ui.sweepButton.clicked.connect(self.doSweep)
# connect up the connect button
self.ui.connectButton.clicked.connect(self.connectToKeithley)
# connect up the apply button
self.ui.applyButton.clicked.connect(self.applySweepValues)
# save any changes the user makes
self.ui.visaAddressLineEdit.editingFinished.connect(self.aSettingHasChanged)
self.ui.terminationLineEdit.editingFinished.connect(self.aSettingHasChanged)
self.ui.timeoutSpinBox.valueChanged.connect(self.aSettingHasChanged)
self.ui.startVoltageDoubleSpinBox.valueChanged.connect(self.aSettingHasChanged)
self.ui.endVoltageDoubleSpinBox.valueChanged.connect(self.aSettingHasChanged)
self.ui.numberOfStepsSpinBox.valueChanged.connect(self.aSettingHasChanged)
self.ui.currentLimitDoubleSpinBox.valueChanged.connect(self.aSettingHasChanged)
self.ui.nPLCDoubleSpinBox.valueChanged.connect(self.aSettingHasChanged)
self.ui.stepDelayDoubleSpinBox.valueChanged.connect(self.aSettingHasChanged)
self.ui.autoDelayCheckBox.stateChanged.connect(self.aSettingHasChanged)
self.ui.autoDelayCheckBox.stateChanged.connect(self.autoDelayStateChange)
self.ui.autoZeroCheckBox.stateChanged.connect(self.aSettingHasChanged)
self.sweepThread = sweepThread(self)
def __del__(self):
try:
print("Closing connection to", self.sm._logging_extra['resource_name'],"...")
self.sm.close() # close connection
print("Connection closed.")
except:
return
def aSettingHasChanged(self, newValue=None):
self.configured = False
sourceWidget = self.sender()
if (type(sourceWidget) is QtWidgets.QLineEdit) and (newValue is None): # this ensures we're not saving a line edit on every new char
self.settings.setValue(sourceWidget.objectName(),sourceWidget.text())
else:
self.settings.setValue(sourceWidget.objectName(),newValue) # save the key-value pair to our settings
def autoDelayStateChange(self):
isChecked = self.ui.autoDelayCheckBox.isChecked()
self.settings.setValue('autoDelay',isChecked)
self.ui.stepDelayDoubleSpinBox.setEnabled(not isChecked)
def applySweepValues(self):
#TODO: somehow detect that a user has changed a sweep parameter in the UI and they need to be resent to the sourcemeter
if not self.setup:
print("The sourcemeter has not been set up. We'll try that now.")
self.connectToKeithley()
if self.setup:
self.sweepParams = {} # here we'll store the parameters that define our sweep
#self.sweepParams['maxCurrent'] = 0.05 # amps
#self.sweepParams['sweepStart'] = -0.003 # volts
#self.sweepParams['sweepEnd'] = 0.003 # volts
#self.sweepParams['nPoints'] = 101
#self.sweepParams['stepDelay'] = -1 # seconds (-1 for auto, nearly zero, delay)
self.sweepParams['maxCurrent'] = self.ui.currentLimitDoubleSpinBox.value()/1000 # amps
self.sweepParams['sweepStart'] = self.ui.startVoltageDoubleSpinBox.value()/1000 # volts
self.sweepParams['sweepEnd'] = self.ui.endVoltageDoubleSpinBox.value()/1000 # volts
self.sweepParams['nPoints'] = self.ui.numberOfStepsSpinBox.value()
self.sweepParams['stepDelay'] = self.ui.stepDelayDoubleSpinBox.value()/1000
self.sweepParams['sourceFun'] = 'voltage'
self.sweepParams['senseFun'] = 'current'
self.sweepParams['fourWire'] = True
self.sweepParams['nplc'] = self.ui.nPLCDoubleSpinBox.value() # intigration time (in number of power line cycles)
if self.ui.autoZeroCheckBox.isChecked():
self.sweepParams['autoZero'] = True
else:
self.sweepParams['autoZero'] = False
if self.ui.autoDelayCheckBox.isChecked():
self.sweepParams['stepDelay'] = -1 # seconds (-1 for auto, nearly zero, delay)
self.sweepParams['durationEstimate'] = k2450.estimateSweepTimeout(self.sweepParams['nPoints'], self.sweepParams['stepDelay'],self.sweepParams['nplc'])
self.configured = k2450.configureSweep(self.sm,self.sweepParams)
if self.configured:
print('Sweep parameters applied.')
else:
print('Sweep parameters not applied.')
def connectToKeithley(self):
# ====for TCPIP comms====
#instrumentIP = ipaddress.ip_address('10.42.0.60') # IP address of sourcemeter
#fullAddress = 'TCPIP::'+str(instrumentIP)+'::INSTR'
#deviceTimeout = 1000 # ms
#fullAddress = 'TCPIP::'+str(instrumentIP)+'::5025::SOCKET' # for raw TCPIP comms directly through a socket @ port 5025 (probably worse than INSTR)
#openParams = {'resource_name':fullAddress, 'timeout': deviceTimeout}
# ====for serial rs232 comms=====
#serialPort = "/dev/ttyUSB0"
#fullAddress = "ASRL"+serialPort+"::INSTR"
#deviceTimeout = 1000 # ms
#sm = rm.open_resource(smAddress)
#sm.set_visa_attribute(visa.constants.VI_ATTR_ASRL_BAUD,57600)
#sm.set_visa_attribute(visa.constants.VI_ASRL_END_TERMCHAR,u'\r')
#openParams = {'resource_name':fullAddress, 'timeout': deviceTimeout}
if (not self.setup):
self.openParams = {'resource_name': self.ui.visaAddressLineEdit.text(), 'timeout': self.ui.timeoutSpinBox.value(), '_read_termination': self.ui.terminationLineEdit.text().replace("\\n", '\n').replace("\\t",
'\t').replace("\\r",'\r')}
self.sm = k2450.visaConnect(self.rm, self.openParams)
if self.sm is not None:
result = k2450.setup2450(self.sm)
if result is True:
self.setup = True
else:
print('Already connected.')
def scrollLog(self): # scrolls log to maximum position
self.ui.textBrowser.verticalScrollBar().setValue(self.ui.textBrowser.verticalScrollBar().maximum())
def doSweep(self):
if not self.configured:
print("The sweep has not been configured. We'll try that now.")
self.applySweepValues()
if self.configured:
print("Waiting 5 seconds before we start the sweep...")
time.sleep(5)
self.sweepThread.start()
#self.ui.tehTabs.setCurrentIndex(0) # switch to plot tab
def main():
app = QtWidgets.QApplication(sys.argv)
sweepUI = MainWindow()
sweepUI.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| AFMD/rs-tool | rs-tool-gui.py | Python | apache-2.0 | 12,191 |
# coding=utf-8
from collections import namedtuple
__all__ = ['Struct', 'PayAppInternalResult', 'PayType', 'PayState', ]
class Struct(object):
def __init__(self, **entries):
self.__dict__.update(entries)
def __repr__(self):
return str(self.__dict__)
def __contains__(self, item):
return item in self.__dict__
class PayAppInternalResult(namedtuple('PayAppInternalResult',
['success', 'error', 'content'])):
pass
class PayType(object):
TYPES = (None, u'신용카드', u'휴대전화', u'해외결제', u'대면결제', None,
u'계좌이체', u'가상계좌', None, u'문화상품권')
def __init__(self, type_id):
if type(type_id) is not int:
type_id = int(type_id)
self.type_id = type_id
def __int__(self):
return self.type_id
def __str__(self):
type_ = PayType.TYPES[self.type_id]
if type_ is None:
return ''
else:
return type_
def __repr__(self):
return str(self.type_id)
class PayState(object):
STATES = (
([1], u'요청', 'req'), ([4], u'결제완료', 'approved'), ([8, 16, 32], u'요청취소', 'cancel'),
([9, 64], u'승인취소', 'cancel-approval'), ([10], u'결제대기', 'waiting')
)
def __init__(self, state_id):
if type(state_id) is not int:
state_id = int(state_id)
self.state_id = state_id
self.state_name = next(state[1] for state in PayState.STATES if state_id in state[0])
self.stat = next(state[2] for state in PayState.STATES if state_id in state[0])
@property
def code(self):
return self.stat
def __int__(self):
return self.state_id
def __str__(self):
return self.state_name
def __repr__(self):
return str(self.state_id)
| ssut/payapp | payapp/classes.py | Python | mit | 1,877 |
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import unittest
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.engine.internals.parser import BuildFilePreludeSymbols, SymbolTable
from pants.engine.legacy.parser import LegacyPythonCallbacksParser
from pants.option.global_options import BuildFileImportsBehavior
from pants.util.frozendict import FrozenDict
class LegacyPythonCallbacksParserTest(unittest.TestCase):
def test_no_import_sideeffects(self) -> None:
# A parser with no symbols registered.
parser = LegacyPythonCallbacksParser(
SymbolTable({}),
BuildFileAliases(),
build_file_imports_behavior=BuildFileImportsBehavior.warn,
)
# Call to import a module should succeed.
parser.parse(
"/dev/null",
b"""import os; os.path.join('x', 'y')""",
BuildFilePreludeSymbols(FrozenDict()),
)
# But the imported module should not be visible as a symbol in further parses.
with self.assertRaises(NameError):
parser.parse(
"/dev/null", b"""os.path.join('x', 'y')""", BuildFilePreludeSymbols(FrozenDict())
)
| tdyas/pants | src/python/pants/engine/legacy/parser_test.py | Python | apache-2.0 | 1,296 |
"""
basics.py
Contains the definitions of basic functions used for various purposes
in the game.
Written by: Mohsin Rizvi
Last edited: 07/12/17
"""
import sys
import os
# Purpose: Prints the keys followed by the values of a given dictionary,
# where the values are CommVal objects.
# Parameters: A dictionary to print values then Command keys for.
# Return: Void
def printPairs(comms):
# Print all key-value pairs in the dictionary
for i in comms:
print(" " + i + " - " + comms[i])
# Purpose: Gets a string input from standard input and checks it against
# all keys in the given dictionary. Returns key that
# input matches. Prompts user until input is valid.
# Parameters: A dictionary of commands to look through, where keys are
# strings and values strings as well.
# Return: The key that input matches.
def command(comms):
inv = True
# Get a command from standard input
entered = input().strip()
# Checks validity of input against each possible command
while True:
# Check validity of input
for i in comms:
if entered.lower() == i.lower():
return i
# Print possible commands if requested
if entered == "$":
printPairs(comms)
inv = False
# The rest of the while loop runs if input is invalid
if inv:
print("-Invalid command. Please enter a new one. Enter \"$\"" +
" for a list of commands.")
inv = True
entered = input().strip()
# Purpose: Appends a new line to the end of the file with the given
# filename if one is not already present.
# Parameters: A filename of a file in this script's directory to append
# a new line onto.
# Return: Void
def fixData(filename):
# Credit given to Stack Overflow for helping me figure out how to get
# the filepath of the current script's directory. More info in
# README.md.
# Get the path of the current script's directory
path = os.path.dirname(os.path.realpath(__file__))
# Change the working directory to this script's directory
os.chdir(path)
# Open the file for reading and writing.
with open(filename, "r+") as fixer:
x = fixer.read()
# Check last character, add a new line char if one is not present
if len(x) > 0:
if x[len(x) - 1] != "\n":
fixer.write("\n")
else:
fixer.write("\n")
# Purpose: Quits the game.
# Parameters: None
# Return: Void
def quit():
print("Thanks for playing!")
sys.exit()
# Purpose: Prints the given error message and quits the game.
# Parameters: A string error message to print.
# Return: Void
def error_quit(message):
sys.exit(message)
| mohsr/print-adventure | src/basics.py | Python | mit | 2,836 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import json
HONEYPOT_CHANGES_PERCENTAGE = 11
def files_check():
try:
file1 = open(sys.argv[1], "rb").read()
except Exception as _:
sys.exit(print("cannot open the file, {0}".format(sys.argv[1])))
try:
file2 = open(sys.argv[2], "rb").read()
except Exception as _:
sys.exit(print("cannot open the file, {0}".format(sys.argv[2])))
return [json.loads(file1), json.loads(file2)]
def percentage(data1, data2):
m = 0
n = 0
for r in data1.rsplit():
try:
if r == data2.rsplit()[m]:
n += 1
except:
n += 1
m += 1
return float(100 / float(float(len(data1.rsplit())) / int(len(data1.rsplit()) - n)))
if __name__ == "__main__":
if len(sys.argv) != 3:
sys.exit(print("usage: python {0} file1.json file2.json".format(sys.argv[0])))
file1, file2 = files_check()
for target_selected in file1:
NOT_FIND_FLAG = True
for target_find in file2:
if target_selected["host"] == target_find["host"]:
PERCENTAGE = percentage(target_selected["I20100_RESPONSE"], target_find["I20100_RESPONSE"])
print("HOST:{0}\tCHANGE PERCENTAGE:{1}%\tDEFAULT CONFIG:{2}\tI30100 TRAP:{3}".format(
target_selected["host"], PERCENTAGE, target_selected["DEFAULT_SIGNATURES"] or
target_selected["DEFAULT_PRODUCTS"],
target_selected["\x01I30100\n"]))
| Nettacker/Nettacker | lib/payload/scanner/ics_honeypot/changes_percentage.py | Python | gpl-3.0 | 1,608 |
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2007 Lukáš Lalinský
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import wave
from picard import log
from picard.file import File
from picard.metadata import Metadata
from picard.util import encode_filename
class WAVFile(File):
EXTENSIONS = [".wav"]
NAME = "Microsoft WAVE"
def _load(self, filename):
log.debug("Loading file %r", filename)
f = wave.open(encode_filename(filename), "rb")
metadata = Metadata()
metadata['~channels'] = f.getnchannels()
metadata['~bits_per_sample'] = f.getsampwidth() * 8
metadata['~sample_rate'] = f.getframerate()
metadata.length = 1000 * f.getnframes() / f.getframerate()
metadata['~format'] = 'Microsoft WAVE'
self._add_path_to_metadata(metadata)
return metadata
def _save(self, filename, metadata):
log.debug("Saving file %r", filename)
pass
| dufferzafar/picard | picard/formats/wav.py | Python | gpl-2.0 | 1,649 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
import inspect
import json
import six
from django.conf import settings
from django.contrib.auth.views import redirect_to_login
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import (
get_callable, NoReverseMatch, RegexURLPattern, reverse
)
from django.http.response import HttpResponseForbidden
from django.utils.encoding import force_str, force_text
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _
from shuup.admin.module_registry import get_modules
from shuup.admin.utils.permissions import (
get_default_model_permissions, get_missing_permissions
)
from shuup.utils.excs import Problem
try:
from urllib.parse import parse_qsl
except ImportError: # pragma: no cover
from urlparse import parse_qsl # Python 2.7
class AdminRegexURLPattern(RegexURLPattern):
def __init__(self, regex, callback, default_args=None, name=None, require_authentication=True, permissions=()):
self.permissions = tuple(permissions)
self.require_authentication = require_authentication
if callable(callback):
callback = self.wrap_with_permissions(callback)
super(AdminRegexURLPattern, self).__init__(regex, callback, default_args, name)
def _get_unauth_response(self, request, reason):
"""
Get an error response (or raise a Problem) for a given request and reason message.
:type request: Request
:param request: HttpRequest
:type reason: Reason string
:param reason: str
"""
if request.is_ajax():
return HttpResponseForbidden(json.dumps({"error": force_text(reason)}))
error_params = urlencode({"error": reason})
login_url = force_str(reverse("shuup_admin:login") + "?" + error_params)
resp = redirect_to_login(next=request.path, login_url=login_url)
if request.user.is_authenticated():
# Instead of redirecting to the login page, let the user know what's wrong with
# a helpful link.
raise (
Problem(_("Can't view this page. %(reason)s") % {"reason": reason})
.with_link(url=resp.url, title=_("Log in with different credentials..."))
)
return resp
def _get_unauth_reason(self, request):
"""
Figure out if there's any reason not to allow the user access to this view via the given request.
:type request: Request
:param request: HttpRequest
:rtype: str|None
"""
if self.require_authentication:
if not request.user.is_authenticated():
return _("You must be logged in.")
elif not getattr(request.user, 'is_staff', False):
return _("You must be a staff member.")
missing_permissions = get_missing_permissions(request.user, self.permissions)
if missing_permissions:
return _("You do not have the required permissions: %s") % ", ".join(missing_permissions)
def wrap_with_permissions(self, view_func):
if callable(getattr(view_func, "as_view", None)):
view_func = view_func.as_view()
@six.wraps(view_func)
def _wrapped_view(request, *args, **kwargs):
unauth_reason = self._get_unauth_reason(request)
if unauth_reason:
return self._get_unauth_response(request, unauth_reason)
return view_func(request, *args, **kwargs)
return _wrapped_view
@property
def callback(self):
if self._callback is not None:
return self._callback
callback = get_callable(self._callback_str)
self._callback = self.wrap_with_permissions(callback)
return self._callback
def admin_url(regex, view, kwargs=None, name=None, prefix='', require_authentication=True, permissions=()):
if isinstance(view, six.string_types):
if not view:
raise ImproperlyConfigured('Empty URL pattern view name not permitted (for pattern %r)' % regex)
if prefix:
view = prefix + '.' + view
return AdminRegexURLPattern(
regex, view, kwargs, name,
require_authentication=require_authentication,
permissions=permissions
)
def get_edit_and_list_urls(url_prefix, view_template, name_template, permissions=()):
"""
Get a list of edit/new/list URLs for (presumably) an object type with standardized URLs and names.
:param url_prefix: What to prefix the generated URLs with. E.g. `"^taxes/tax"`
:type url_prefix: str
:param view_template: A template string for the dotted name of the view class.
E.g. "shuup.admin.modules.taxes.views.Tax%sView"
:type view_template: str
:param name_template: A template string for the URLnames. E.g. "tax.%s"
:type name_template: str
:return: List of URLs
:rtype: list[AdminRegexURLPattern]
"""
return [
admin_url(
"%s/(?P<pk>\d+)/$" % url_prefix,
view_template % "Edit",
name=name_template % "edit",
permissions=permissions
),
admin_url(
"%s/new/$" % url_prefix,
view_template % "Edit",
name=name_template % "new",
kwargs={"pk": None},
permissions=permissions
),
admin_url(
"%s/$" % url_prefix,
view_template % "List",
name=name_template % "list",
permissions=permissions
),
admin_url(
"%s/list-settings/" % url_prefix,
"shuup.admin.modules.settings.views.ListSettingsView",
name=name_template % "list_settings",
permissions=permissions,
)
]
class NoModelUrl(ValueError):
pass
def get_model_url(object, kind="detail", user=None, required_permissions=None):
"""
Get a an admin object URL for the given object or object class by
interrogating each admin module.
If a user is provided, checks whether user has correct permissions
before returning URL.
Raises `NoModelUrl` if lookup fails
:param object: Model or object class.
:type object: class
:param kind: URL kind. Currently "new", "list", "edit", "detail".
:type kind: str
:param user: Optional instance to check for permissions
:type user: django.contrib.auth.models.User|None
:param required_permissions: Optional iterable of permission strings
:type required_permissions: Iterable[str]|None
:return: Resolved URL.
:rtype: str
"""
for module in get_modules():
url = module.get_model_url(object, kind)
if not url:
continue
if user is None:
return url
else:
permissions = ()
if required_permissions is not None:
permissions = required_permissions
else:
# TODO: Check permission type based on kind
permissions = get_default_model_permissions(object)
if not get_missing_permissions(user, permissions):
return url
raise NoModelUrl("Can't get object URL of kind %s: %r" % (kind, force_text(object)))
def derive_model_url(model_class, urlname_prefix, object, kind):
"""
Try to guess a model URL for the given `object` and `kind`.
An utility for people implementing `get_model_url`.
:param model_class: The model class the object must be an instance or subclass of.
:type model_class: class
:param urlname_prefix: URLname prefix. For instance, `shuup_admin:product.`
:type urlname_prefix: str
:param object: The model or model class as passed to `get_model_url`
:type object: django.db.models.Model|class
:param kind: URL kind as passed to `get_model_url`.
:type kind: str
:return: Resolved URL or None.
:rtype: str|None
"""
if not (isinstance(object, model_class) or (inspect.isclass(object) and issubclass(object, model_class))):
return
kind_to_urlnames = {
"detail": ("%s.detail" % urlname_prefix, "%s.edit" % urlname_prefix),
}
kwarg_sets = [{}]
if getattr(object, "pk", None):
kwarg_sets.append({"pk": object.pk})
for urlname in kind_to_urlnames.get(kind, ["%s.%s" % (urlname_prefix, kind)]):
for kwargs in kwarg_sets:
try:
return reverse(urlname, kwargs=kwargs)
except NoReverseMatch:
pass
# No match whatsoever.
return None
def manipulate_query_string(url, **qs):
if "?" in url:
url, current_qs = url.split("?", 1)
qs = dict(parse_qsl(current_qs), **qs)
qs = [(key, value) for (key, value) in qs.items() if value is not None]
if qs:
return "%s?%s" % (url, urlencode(qs))
else:
return url
def get_model_front_url(request, object):
"""
Get a frontend URL for an object.
:param request: Request
:type request: HttpRequest
:param object: A model instance
:type object: django.db.models.Model
:return: URL or None
:rtype: str|None
"""
# TODO: This method could use an extension point for alternative frontends.
if not object.pk:
return None
if "shuup.front" in settings.INSTALLED_APPS:
# Best effort to use the default frontend for front URLs.
try:
from shuup.front.template_helpers.urls import model_url
return model_url({"request": request}, object)
except (ValueError, NoReverseMatch):
pass
return None
| suutari/shoop | shuup/admin/utils/urls.py | Python | agpl-3.0 | 9,892 |
#!/usr/bin/env python
#******************************************************************************
# $Id: gdalchksum.py 18952 2010-02-28 11:59:53Z rouault $
#
# Project: GDAL
# Purpose: Application to checksum a GDAL image file.
# Author: Frank Warmerdam, [email protected]
#
#******************************************************************************
# Copyright (c) 2003, Frank Warmerdam <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#******************************************************************************
try:
from osgeo import gdal
except ImportError:
import gdal
import sys
def Usage():
print('Usage: gdalchksum.py [-b band] [-srcwin xoff yoff xsize ysize] file')
sys.exit(1)
# =============================================================================
# Mainline
# =============================================================================
srcwin = None
bands = []
filename = None
gdal.AllRegister()
argv = gdal.GeneralCmdLineProcessor( sys.argv )
if argv is None:
sys.exit( 0 )
# Parse command line arguments.
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-b':
i = i + 1
bands.append( int(argv[i]) )
elif arg == '-srcwin':
srcwin = [int(argv[i+1]),int(argv[i+2]),
int(argv[i+3]),int(argv[i+3]) ]
i = i + 4
elif filename is None:
filename = argv[i]
else:
Usage()
i = i + 1
if filename is None:
Usage()
# Open source file
ds = gdal.Open( filename )
if ds is None:
print('Unable to open %s' % filename)
sys.exit(1)
# Default values
if srcwin is None:
srcwin = [ 0, 0, ds.RasterXSize, ds.RasterYSize ]
if len(bands) == 0:
bands = list(range(1,(ds.RasterCount+1)))
# Generate checksums
for band_num in bands:
oBand = ds.GetRasterBand( band_num )
result = oBand.Checksum( srcwin[0], srcwin[1], srcwin[2], srcwin[3] )
print(result)
ds = None
| kctan0805/vdpm | share/gdal/gdal-2.0.0/swig/python/scripts/gdalchksum.py | Python | lgpl-2.1 | 3,011 |
__version__ = (0, 4, 0, 'dev', 0)
def get_version():
version = '%d.%d.%d' % __version__[0:3]
if __version__[3]:
version = '%s-%s%s' % (version, __version__[3],
(__version__[4] and str(__version__[4])) or '')
return version
| jszakmeister/trac-backlog | backlog/__init__.py | Python | bsd-3-clause | 271 |
"""
:synopsis: Unit Tests for Windows iis Module 'state.win_iis'
:platform: Windows
.. versionadded:: 2019.2.2
"""
import pytest
import salt.states.win_iis as win_iis
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {win_iis: {}}
def __base_webconfiguration_ret(comment="", changes=None, name="", result=None):
return {
"name": name,
"changes": changes if changes else {},
"comment": comment,
"result": result,
}
def test_webconfiguration_settings_no_settings():
name = "IIS"
settings = {}
expected_ret = __base_webconfiguration_ret(
name=name, comment="No settings to change provided.", result=True
)
actual_ret = win_iis.webconfiguration_settings(name, settings)
assert expected_ret == actual_ret
def test_webconfiguration_settings_collection_failure():
name = "IIS:\\"
settings = {
"system.applicationHost/sites": {
"Collection[{name: site0}].logFile.directory": "C:\\logs\\iis\\site0",
},
}
old_settings = [
{
"filter": "system.applicationHost/sites",
"name": "Collection[{name: site0}].logFile.directory",
"value": "C:\\logs\\iis\\old_site",
}
]
current_settings = old_settings
new_settings = old_settings
expected_ret = __base_webconfiguration_ret(
name=name,
result=False,
changes={
"changes": {
old_settings[0]["filter"]
+ "."
+ old_settings[0]["name"]: {
"old": old_settings[0]["value"],
"new": settings[old_settings[0]["filter"]][old_settings[0]["name"]],
}
},
"failures": {
old_settings[0]["filter"]
+ "."
+ old_settings[0]["name"]: {
"old": old_settings[0]["value"],
"new": new_settings[0]["value"],
}
},
},
comment="Some settings failed to change.",
)
with patch.dict(
win_iis.__salt__,
{
"win_iis.get_webconfiguration_settings": MagicMock(
side_effect=[old_settings, current_settings, new_settings]
),
"win_iis.set_webconfiguration_settings": MagicMock(return_value=True),
},
), patch.dict(win_iis.__opts__, {"test": False}):
actual_ret = win_iis.webconfiguration_settings(name, settings)
assert expected_ret == actual_ret
def test_webconfiguration_settings_collection():
name = "IIS:\\"
settings = {
"system.applicationHost/sites": {
"Collection[{name: site0}].logFile.directory": "C:\\logs\\iis\\site0",
},
}
old_settings = [
{
"filter": "system.applicationHost/sites",
"name": "Collection[{name: site0}].logFile.directory",
"value": "C:\\logs\\iis\\old_site",
}
]
current_settings = [
{
"filter": "system.applicationHost/sites",
"name": "Collection[{name: site0}].logFile.directory",
"value": "C:\\logs\\iis\\site0",
}
]
new_settings = current_settings
expected_ret = __base_webconfiguration_ret(
name=name,
result=True,
changes={
old_settings[0]["filter"]
+ "."
+ old_settings[0]["name"]: {
"old": old_settings[0]["value"],
"new": new_settings[0]["value"],
}
},
comment="Set settings to contain the provided values.",
)
with patch.dict(
win_iis.__salt__,
{
"win_iis.get_webconfiguration_settings": MagicMock(
side_effect=[old_settings, current_settings, new_settings]
),
"win_iis.set_webconfiguration_settings": MagicMock(return_value=True),
},
), patch.dict(win_iis.__opts__, {"test": False}):
actual_ret = win_iis.webconfiguration_settings(name, settings)
assert expected_ret == actual_ret
def test_container_settings_password_redacted():
name = "IIS:\\"
container = "AppPools"
settings = {
"processModel.userName": "Administrator",
"processModel.password": "Sup3rS3cr3tP@ssW0rd",
"processModel.identityType": "SpecificUser",
}
old_settings = {
"processModel.userName": "Administrator",
"processModel.password": "0ldP@ssW0rd1!",
"processModel.identityType": "SpecificUser",
}
current_settings = {
"processModel.userName": "Administrator",
"processModel.password": "Sup3rS3cr3tP@ssW0rd",
"processModel.identityType": "SpecificUser",
}
new_settings = current_settings
expected_ret = {
"name": name,
"changes": {
"processModel.password": {
"new": "XXX-REDACTED-XXX",
"old": "XXX-REDACTED-XXX",
}
},
"comment": "Set settings to contain the provided values.",
"result": True,
}
with patch.dict(
win_iis.__salt__,
{
"win_iis.get_container_setting": MagicMock(
side_effect=[old_settings, current_settings, new_settings]
),
"win_iis.set_container_setting": MagicMock(return_value=True),
},
), patch.dict(win_iis.__opts__, {"test": False}):
actual_ret = win_iis.container_setting(
name=name, container=container, settings=settings
)
assert expected_ret == actual_ret
def test_container_settings_password_redacted_test_true():
name = "IIS:\\"
container = "AppPools"
settings = {
"processModel.userName": "Administrator",
"processModel.password": "Sup3rS3cr3tP@ssW0rd",
"processModel.identityType": "SpecificUser",
}
old_settings = {
"processModel.userName": "Administrator",
"processModel.password": "0ldP@ssW0rd1!",
"processModel.identityType": "SpecificUser",
}
current_settings = {
"processModel.userName": "Administrator",
"processModel.password": "Sup3rS3cr3tP@ssW0rd",
"processModel.identityType": "SpecificUser",
}
new_settings = current_settings
expected_ret = {
"name": name,
"changes": {
"processModel.password": {
"new": "XXX-REDACTED-XXX",
"old": "XXX-REDACTED-XXX",
}
},
"comment": "Settings will be changed.",
"result": None,
}
with patch.dict(
win_iis.__salt__,
{
"win_iis.get_container_setting": MagicMock(
side_effect=[old_settings, current_settings, new_settings]
),
"win_iis.set_container_setting": MagicMock(return_value=True),
},
), patch.dict(win_iis.__opts__, {"test": True}):
actual_ret = win_iis.container_setting(
name=name, container=container, settings=settings
)
assert expected_ret == actual_ret
def test_container_settings_password_redacted_failures():
name = "IIS:\\"
container = "AppPools"
settings = {
"processModel.userName": "Administrator",
"processModel.password": "Sup3rS3cr3tP@ssW0rd",
"processModel.identityType": "SpecificUser",
}
old_settings = {
"processModel.userName": "Spongebob",
"processModel.password": "0ldP@ssW0rd1!",
"processModel.identityType": "SpecificUser",
}
current_settings = {
"processModel.userName": "Administrator",
"processModel.password": "0ldP@ssW0rd1!",
"processModel.identityType": "SpecificUser",
}
new_settings = old_settings
expected_ret = {
"name": name,
"changes": {
"changes": {
"processModel.userName": {"new": "Administrator", "old": "Spongebob"}
},
"failures": {
"processModel.password": {
"new": "XXX-REDACTED-XXX",
"old": "XXX-REDACTED-XXX",
}
},
},
"comment": "Some settings failed to change.",
"result": False,
}
with patch.dict(
win_iis.__salt__,
{
"win_iis.get_container_setting": MagicMock(
side_effect=[old_settings, current_settings, new_settings]
),
"win_iis.set_container_setting": MagicMock(return_value=True),
},
), patch.dict(win_iis.__opts__, {"test": False}):
actual_ret = win_iis.container_setting(
name=name, container=container, settings=settings
)
assert expected_ret == actual_ret
| saltstack/salt | tests/pytests/unit/states/test_win_iis.py | Python | apache-2.0 | 8,811 |
# yaranullin/tests/event_system.py
#
# Copyright (c) 2012 Marco Scopesi <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import sys
import unittest
import collections
if __name__ == '__main__':
sys.path.insert(0, ".")
from yaranullin.weakcallback import WeakCallback
from yaranullin.event_system import connect, disconnect, post, _EVENTS, \
_QUEUE, process_queue
Q = collections.deque()
def func_handler(ev):
''' Simple function handler '''
del ev['id']
del ev['event']
Q.append(ev)
class Handler(object):
''' Class to test method handler '''
def method_handler(self):
''' Simple method handler '''
class TestEvents(unittest.TestCase):
def setUp(self):
_QUEUE.clear()
_EVENTS.clear()
Q.clear()
def test_connect(self):
# Connect a single handler
connect('test', func_handler)
wrapper = WeakCallback(func_handler)
self.assertTrue('test' in _EVENTS)
self.assertTrue(wrapper in _EVENTS['test'])
def test_disconnect(self):
# Disconnect a single handler
connect('test', func_handler)
disconnect('test', func_handler)
wrapper = WeakCallback(func_handler)
self.assertFalse(wrapper in _EVENTS['test'])
def test_post(self):
# Post a single event
connect('test', func_handler)
event_dict = {'event': 'test', 'id': post('test')}
self.failUnlessEqual(event_dict, _QUEUE.popleft())
def test_process_queue(self):
connect('test', func_handler)
event_dict = {'args1': 1, 'arg2':2}
post('test', **event_dict)
post('test', event_dict)
process_queue()
self.failUnlessEqual(event_dict, Q.popleft())
self.failUnlessEqual(event_dict, Q.popleft())
if __name__ == '__main__':
unittest.main()
| ciappi/Yaranullin | yaranullin/tests/event_system.py | Python | isc | 2,539 |
# Developer and idea: Lasse Steenbock Vestergaard ([email protected])
#
# The software is distributed under the MIT license
#
# Copyright (C) 2014 The Alexandra Institute A/S www.alexandra.dk/uk
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from tornado.web import MissingArgumentError
import tornado.httpserver
import tornado.websocket
import tornado.ioloop
import tornado.web
import json
import uuid
clients={}
class WSHandler(tornado.websocket.WebSocketHandler):
#Register new connection
def open(self):
try:
#Initial setup of the entity
self.id=self.get_argument('id', uuid.uuid1().urn[9:]) if self.get_argument('id', uuid.uuid1()) != "" else uuid.uuid1().urn[9:]
self.type=self.get_argument('type','default')
self.observers=[]
self.methods=[]
self.subscribeToRegistrations=False
self.stream.set_nodelay(True)
#Test if id exists
if not clients.has_key(self.id):
for elem in clients.keys():
if(clients[elem].subscribeToRegistrations):
clients[elem].write_message('{"register":"'+self.id+'"}')
clients[self.id]=self
else:
raise 'Existing id'
#Close connection if id is not supplied or already existing
except MissingArgumentError:
self.write_message('{"error":"Id is not supplied. Please add an Id."}')
self.close()
except 'Existing id':
self.write_message('{"error":"Id is taken. Try with an other Id."}')
self.close()
#Reads the recieved message and sends it to specific recipient or all observers
def on_message(self, message):
print message
message=json.loads(message)
print message
try:
getattr(self, message["request"])(message)
except AttributeError:
self.write_message('{"error":"Method with the provided arguments does not exist."}')
#Close connection and remove from observer lists
def on_close(self):
#Notify all observers that this entity is shutting down
for elem in self.observers:
clients[elem].write_message('{"unregister":"'+self.id+'"}')
#Remove this entity from all observed entities lists
for elem in clients.keys():
for el in clients[elem].observers:
if(el==self.id):
self.unsubscribe_from(elem)
break
#Remove from clients list
clients.pop(self.id, None)
#######################################
# #
# Below are custom server functions #
# #
#######################################
#Scenario 1 transactions
#clientX | server | clientY
# --------->
# <---------
#Get all entities in the system
def get_all_entities(self, message):
temp=[]
for elem in clients.keys():
tempdict={}
tempdict['id']=clients[elem].id
tempdict['type']=clients[elem].type
temp.append(tempdict)
self.write_message('{"result":'+json.dumps(temp)+', "req_id":"'+message["req_id"]+'"}')
#Get specific entity by id
def get_entity(self, message):
try:
cli=clients[message["params"][0]]
tempdict={}
tempdict['id']=cli.id
tempdict['type']=cli.type
tempdict['observers']=cli.observers
tempdict['methods']=cli.methods
self.write_message('{"result":'+json.dumps(tempdict)+', "req_id":"'+message["req_id"]+'"}')
except KeyError:
self.write_message('{"error":"Requested client does not exist."}')
#Get entities by type
def get_entities_by_type(self, message):
tempList=[]
try:
for elem in clients.keys():
if clients[elem].type == message["params"][0]:
tempList.append(clients[elem].id)
typeIds={"type":message["params"][0],"idlist":tempList}
self.write_message('{"result":"'+json.dumps(typeIds)+'", "req_id":"'+message["req_id"]+'"}' )
except:
self.write_message('{"error":"Wrong parameters."}')
#Get all types
def get_all_types(self, message):
tempList=[]
for elem in clients.keys():
tempList.append(clients[elem].type)
self.write_message('{"result":"'+json.dumps(sorted(set(tempList)))+'", "req_id":"'+message["req_id"]+'"}')
#Scenario 1 transactions
#clientX | server | clientY
# --------->
#Subscribe to be notified when new entities connect
def subscribe_to_registrations(self, message):
self.subscribeToRegistrations=True
#Unsubscribe from being notified when new entities connect
def unsubscribe_from_registrations(self, message):
self.subscribeToRegistrations=False
#An entity can register all of it"s rpc methods
def register_methods(self, message):
try:
self.methods=message["params"]
except:
self.write_message('{"error":"No arguments supplied"}')
#Subscribe to a publisher (id)
def subscribe_to(self, message):
try:
clients[message["params"][0]].observers.append(self.id)
except:
self.write_message('{"error":"Observer does not exist"}')
#Unsubscribe from a publisher (id)
def unsubscribe_from(self, message):
try:
clients[message].observers.remove(self.id)
print self.id+" has successfully been unsubscribed from "+message
except KeyError:
self.write_message('{"error":"Observer does not exist"}')
except IndexError:
self.write_message('{"error":"The client is not observing this observer"}')
#Scenario 4 transactions
#clientX | server | clientY
# --------->
# --------->
# <---------
# <---------
#Call a remote procedure
def call_method(self, message):
receiver=message["receiver"]
message["receiver"]=self.id
clients[receiver].write_message(json.dumps(message))
def result(self, message):
clients[message["receiver"]].write_message('{"result":'+json.dumps(message["content"])+',"req_id":"'+message["req_id"]+'"}')
#Scenario 3 transactions
#clientX | server | clientY
# --------->
# --------->
#Publish content
def publish(self, message):
for elem in self.observers:
clients[elem].write_message('{"request":"publish","content":'+json.dumps(message["content"])+',"sender":"'+self.id+'"}')
application = tornado.web.Application([(r"/ws", WSHandler)])
if __name__ == '__main__':
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8888)
tornado.ioloop.IOLoop.instance().start() | alexandrainst/arip | server/genericplatform.py | Python | mit | 8,159 |
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2012 Bert Vermeulen <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
'''
Extended Display Identification Data (EDID) 1.3 structure decoder.
The three-character vendor ID as specified in the EDID standard refers to
a Plug and Play ID (PNPID). The list of PNPID assignments is done by Microsoft.
The 'pnpids.txt' file included with this protocol decoder is derived from
the list of assignments downloadable from that page. It was retrieved in
January 2012.
Details:
https://en.wikipedia.org/wiki/Extended_display_identification_data
http://msdn.microsoft.com/en-us/windows/hardware/gg463195
'''
from .pd import Decoder
| DreamSourceLab/DSView | libsigrokdecode4DSL/decoders/edid/__init__.py | Python | gpl-3.0 | 1,316 |
import urllib.request as urllib
from bs4 import BeautifulSoup
def get_html(adress):
""" Gets HTML from the adress provided
and returns it as a soup """
response = urllib.urlopen(adress) # add exception handeling to this
html = response.read()
soup = BeautifulSoup(html, 'html.parser')
return(soup)
def get_text(tag):
""" Returns all the text in as soup that
is between the tags provided """
lines = []
for tag in tags:
line = tag.get_text()
if line != '':
line = line.strip('\n')
lines.append(line)
return(lines)
def add_elem(array, elem):
""" Adds an element to a list if non empty
used when searches could return blank feilds"""
if elem != '':
array.append(elem)
return(array)
def clean(text):
""" Removes all the special characters from a string"""
special = "\n\t"
# remove while space and special characters at the begining
# and end of the string
text = text.strip()
new_text = ""
special_chars = False
# remove special characters in the interiour of the string and replace
# with a space
for letter in text:
letter_is_special = letter in special
if letter_is_special and not special_chars:
new_text += " "
special_chars = True
elif not letter_is_special:
new_text += letter
special_chars = False
return new_text
def tag_submissions(sub_list, tag_name, tag_keys):
""" Reads through a list of submissions and adds the tag 'tag_name'
if any of the strings, in 'tag_keys' are found in the the name or
other information. Case insensitive """
tag_name = tag_name.lower()
for sub in sub_list:
sub_name = sub[0].lower()
extra_info = sub[3].lower()
for key in tag_keys:
key = key.lower()
if (key in sub_name or key in extra_info):
sub[4].append(tag_name)
break
return sub_list
def check_size(sub):
""" Checks that the size of the strings in the submission is not to big
for the models in Django """
name_size = 200
city_size = 50
link_size = 250
tag_size = 50
fits = True
fits *= name_size >= len(sub[0])
fits *= link_size >= len(sub[1])
for tag in sub[4]:
fits *= tag_size > len(tag)
if not fits:
print("Submission is too big")
print(sub)
import sys; sys.exit()
| Stinners/Web-Scrapper | Scrapper/utilities.py | Python | gpl-3.0 | 2,491 |
#!/usr/bin/env python2
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys, os, unittest
#Check if config.py exists. Get 'fife_path' from config
try:
import config
sys.path.append(config.fife_path)
except:
pass
def _jp(path):
return os.path.sep.join(path.split('/'))
_paths = ('../../engine/swigwrappers/python', '../../engine/extensions', 'tests', "src")
test_suite = unittest.TestSuite()
for p in _paths:
if p not in sys.path:
sys.path.append(_jp(p))
for p in os.listdir("tests") :
if p[-3:] == ".py" :
test_suite.addTest(unittest.TestLoader().loadTestsFromName(p[:-3]))
unittest.TextTestRunner(verbosity=2).run(test_suite)
| parpg/parpg | run_tests.py | Python | gpl-3.0 | 1,286 |
# -*- coding: utf-8 -*-
# vim: noai:ts=4:sw=4:expandtab
import subprocess
from mockbuild.trace_decorator import getLog, traceLog
from mockbuild import util
class Podman:
""" interacts with podman to create build chroot """
@traceLog()
def __init__(self, buildroot, image):
self.buildroot = buildroot
self.image = image
self.container_id = None
getLog().info("Using bootstrap image: %s", image)
@traceLog()
def pull_image(self):
""" pull the latest image """
getLog().info("Pulling image: %s", self.image)
cmd = ["podman", "pull", self.image]
util.do(cmd, printOutput=True, env=self.buildroot.env)
@traceLog()
def get_container_id(self):
""" start a container and detach immediately """
cmd = ["podman", "run", "--quiet", "-i", "--detach", self.image, "/bin/bash"]
container_id = util.do(cmd, returnOutput=True, env=self.buildroot.env)
self.container_id = container_id.strip()
return self.container_id
@traceLog()
def exec(self, command):
""" exec command in container """
cmd = ["podman", "exec", self.container_id] + command
util.do(cmd, printOutput=True, env=self.buildroot.env)
@traceLog()
def install_pkgmgmt_packages(self):
""" make sure the image contains expected packages """
pmname = self.buildroot.config['package_manager']
binary = self.buildroot.config['{}_command'.format(pmname)]
install_command = self.buildroot.config['{}_install_command'.format(pmname)]
cmd = [binary, '-y']
cmd += install_command.split()
self.exec(cmd)
@traceLog()
def export(self, cache_file_name, compress_program):
""" export container and compress it """
getLog().info("Exporting container: %s as %s", self.image, cache_file_name)
cmd_podman = ["podman", "export", self.container_id]
podman = subprocess.Popen(cmd_podman, stdout=subprocess.PIPE)
cache_file = open(cache_file_name, "w")
cmd_compressor = [compress_program, "--stdout"]
compressor = subprocess.Popen(cmd_compressor, stdin=podman.stdout, stdout=cache_file)
compressor.communicate()
podman.communicate()
cache_file.close()
@traceLog()
def cp(self, destination, tar_cmd):
""" copy content of container to destination directory """
getLog().info("Copy content of container %s to %s", self.image, destination)
cmd_podman = ["podman", "export", self.container_id]
podman = subprocess.Popen(cmd_podman, stdout=subprocess.PIPE)
cmd_tar = [tar_cmd, "-xC", destination, "-f", "-"]
tar = subprocess.Popen(cmd_tar, stdin=podman.stdout)
tar.communicate()
podman.communicate()
@traceLog()
def remove(self):
""" remove the container """
cmd = ["podman", "rm", "-f", self.container_id]
util.do(cmd)
self.container_id = None
def __repr__(self):
return "Podman({}({}))".format(self.image, self.container_id)
| rpm-software-management/mock | mock/py/mockbuild/podman.py | Python | gpl-2.0 | 3,096 |
# Write the benchmarking functions here.
# See "Writing benchmarks" in the asv docs for more information.
import bdot
import numpy as np
class TimeSuite:
"""
An example benchmark that times the performance of various kinds
of iterating over dictionaries in Python.
Production
1. Nearest Neighbor Search
a. medium length 300,000 rows
i. 32 columns
ii. 128 columns
iii. 512 columns
b. big length 1,000,000 rows
i. 32 columns
ii. 128 columns
iii. 512 columns
Analytical
2. Outer Product on Vectors (Correlation Matrix)
a. medium data 51,810 rows
b. pretty big data 366,357 rows
"""
def setup(self):
self.d = {}
for x in range(500):
self.d[x] = None
def time_matrix_2_18_vector_32(self):
matrix = np.random.random_integers(0, 120, size=(2 ** 18, 32))
bcarray = bdot.carray(matrix, chunklen=2**14, cparams=bdot.cparams(clevel=2))
v = bcarray[0]
output = bcarray.empty_like_dot(v)
result = bcarray.dot(v, out=output)
class MemSuite:
def mem_matrix_2_18_vector_32(self):
matrix = np.random.random_integers(0, 120, size=(2 ** 18, 32))
bcarray = bdot.carray(matrix, chunklen=2**14, cparams=bdot.cparams(clevel=2))
v = bcarray[0]
output = bcarray.empty_like_dot(v)
result = bcarray.dot(v, out=output)
return result
| tailwind/bdot | benchmarks/asv/benchmarks.py | Python | mit | 1,523 |
# -*- coding: utf-8 -*-
# Roastero, released under GPLv3
import json
import openroast
from multiprocessing import sharedctypes, Array
import ctypes
import freshroastsr700
class Recipe(object):
def __init__(self, roaster, app, max_recipe_size_bytes=64*1024):
# this object is accessed by multiple processes, in part because
# freshroastsr700 calls Recipe.move_to_next_section() from a
# child process. Therefore, all data handling must be process-safe.
# recipe step currently being applied
self.currentRecipeStep = sharedctypes.Value('i', 0)
# Stores recipe
# Here, we need to use shared memory to store the recipe.
# Tried multiprocessing.Manager, wasn't very successful with that,
# resorting to allocating a fixed-size, large buffer to store a JSON
# string. This Array needs to live for the lifetime of the object.
self.recipe_str = Array(ctypes.c_char, max_recipe_size_bytes)
# Tells if a recipe has been loaded
self.recipeLoaded = sharedctypes.Value('i', 0) # boolean
# we are not storing this object in a process-safe manner,
# but its members are process-safe (make sure you only use
# its process-safe members from here!)
self.roaster=roaster
self.app = app
def _recipe(self):
# retrieve the recipe as a JSON string in shared memory.
# needed to allow freshroastsr700 to access Recipe from
# its child process
if self.recipeLoaded.value:
return json.loads(self.recipe_str.value.decode('utf_8'))
else:
return {}
def load_recipe_json(self, recipeJson):
# recipeJson is actually a dict...
self.recipe_str.value = json.dumps(recipeJson).encode('utf_8')
self.recipeLoaded.value = 1
def load_recipe_file(self, recipeFile):
# Load recipe file
recipeFileHandler = open(recipeFile)
recipe_dict = json.load(recipeFileHandler)
recipeFileHandler.close()
self.load_recipe_json(recipe_dict)
def clear_recipe(self):
self.recipeLoaded.value = 0
self.recipe_str.value = ''.encode('utf_8')
self.currentRecipeStep.value = 0
def check_recipe_loaded(self):
return self.recipeLoaded.value != 0
def get_num_recipe_sections(self):
if not self.check_recipe_loaded():
return 0
return len(self._recipe()["steps"])
def get_current_step_number(self):
return self.currentRecipeStep.value
def get_current_fan_speed(self):
crnt_step = self.currentRecipeStep.value
return self._recipe()["steps"][crnt_step]["fanSpeed"]
def get_current_target_temp(self):
crnt_step = self.currentRecipeStep.value
if(self._recipe()["steps"][crnt_step].get("targetTemp")):
return self._recipe()["steps"][crnt_step]["targetTemp"]
else:
return 150
def get_current_section_time(self):
crnt_step = self.currentRecipeStep.value
return self._recipe()["steps"][crnt_step]["sectionTime"]
def restart_current_recipe(self):
self.currentRecipeStep.value = 0
self.load_current_section()
def more_recipe_sections(self):
if not self.check_recipe_loaded():
return False
if(len(self._recipe()["steps"]) - self.currentRecipeStep.value == 0):
return False
else:
return True
def get_current_cooling_status(self):
crnt_step = self.currentRecipeStep.value
if(self._recipe()["steps"][crnt_step].get("cooling")):
return self._recipe()["steps"][crnt_step]["cooling"]
else:
return False
def get_section_time(self, index):
return self._recipe()["steps"][index]["sectionTime"]
def get_section_temp(self, index):
if(self._recipe()["steps"][index].get("targetTemp")):
return self._recipe()["steps"][index]["targetTemp"]
else:
return 150
def reset_roaster_settings(self):
self.roaster.target_temp = 150
self.roaster.fan_speed = 1
self.roaster.time_remaining = 0
def set_roaster_settings(self, targetTemp, fanSpeed, sectionTime, cooling):
if cooling:
self.roaster.cool()
# Prevent the roaster from starting when section time = 0 (ex clear)
if(not cooling and sectionTime > 0 and
self.currentRecipeStep.value > 0):
self.roaster.roast()
self.roaster.target_temp = targetTemp
self.roaster.fan_speed = fanSpeed
self.roaster.time_remaining = sectionTime
def load_current_section(self):
self.set_roaster_settings(self.get_current_target_temp(),
self.get_current_fan_speed(),
self.get_current_section_time(),
self.get_current_cooling_status())
def move_to_next_section(self):
# this gets called from freshroastsr700's timer process, which
# is spawned using multiprocessing. Therefore, all things
# accessed in this function must be process-safe!
if self.check_recipe_loaded():
if(
(self.currentRecipeStep.value + 1) >=
self.get_num_recipe_sections()):
self.roaster.idle()
else:
self.currentRecipeStep.value += 1
self.load_current_section()
# call back into RoastTab window
self.app.roasttab_flag_update_controllers()
else:
self.roaster.idle()
def get_current_recipe(self):
return self._recipe()
| Roastero/Openroast | openroast/controllers/recipe.py | Python | gpl-3.0 | 5,733 |
Subsets and Splits