repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
RENCI/xDCIShare | hs_metrics/views.py | 1 | 5571 | from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.generic import TemplateView
from django.contrib.auth.models import User
from mezzanine.generic.models import Rating, ThreadedComment
from theme.models import UserProfile # fixme switch to party model
from hs_core import hydroshare
from collections import Counter
class xDCIShareSiteMetrics(TemplateView):
template_name = 'hs_metrics/hydrosharesitemetrics.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(xDCIShareSiteMetrics, self).dispatch(request, *args, **kwargs)
def __init__(self, **kwargs):
super(xDCIShareSiteMetrics, self).__init__(**kwargs)
self.n_registered_users = User.objects.all().count()
self.n_host_institutions = 0
self.host_institutions = set()
self.n_users_logged_on = None # fixme need to track
self.max_logon_duration = None # fixme need to track
self.n_courses = 0
self.n_agencies = 0
self.agencies = set()
self.n_core_contributors = 6 # fixme need to track (use GItHub API Key) https://api.github.com/teams/328946
self.n_extension_contributors = 10 # fixme need to track (use GitHub API Key) https://api.github.com/teams/964835
self.n_citations = 0 # fixme hard to quantify
self.resource_type_counts = Counter()
self.user_titles = Counter()
self.user_professions = Counter()
self.user_subject_areas = Counter()
self.n_ratings = 0
self.n_comments = 0
self.n_resources = 0
def get_context_data(self, **kwargs):
"""
1. Number of registered users (with voluntarily supplied demography and diversity)
2. Number of host institutions (with demography).
3. Use statistics (for each month number and average log-on duration, maximum number of users logged on, total
CPU hours of model run time by different compute resources).
4. Number of courses and students using educational material (with demography and diversity based on user
information).
5. Number of ratings and comments about resources.
6. The quantity of hydrological data including data values, sites, and variables, and web service data requests
per day.
7. The number of non-CUAHSI agencies that utilize xDCIShare (e.g. NCDC).
8. The number of contributors to the core infrastructure code base.
9. The number of contributors to non-core code that is part of the system, such as clients or apps and other
software projects where changes are made to adapt for xDCIShare
10. The number of downloads of releases of clients and apps.
11. The number of users trained during the various outreach activities.
12. Number of papers submitted to and published in peer reviewed forums about this project or using the
infrastructure of this project. To the extent possible these will be stratified demographically and based
on whether they report contributions that are domain research or cyberinfrastructure. We will also measure
posters, invited talks, panel sessions, etc. We will also track citations generated by these papers.
13. Number of citations of various xDCIShare resources.
14. The types and amounts of resources stored within the system, and their associated downloads (resource types
will include data of varying type, model codes, scripts, workflows and documents).
:param kwargs:
:return:
"""
ctx = super(xDCIShareSiteMetrics, self).get_context_data(**kwargs)
self.get_resource_stats()
self.get_user_stats()
self.user_professions = self.user_professions.items()
self.user_subject_areas = self.user_subject_areas.items()
self.resource_type_counts = self.resource_type_counts.items()
self.user_titles = self.user_titles.items()
ctx['metrics'] = self
return ctx
def get_all_resources(self):
"""Yield all resources in the system as a single generator"""
resource_types = hydroshare.get_resource_types()
for qs in (res_model.objects.all() for res_model in resource_types):
for resource in qs:
yield resource
def get_resource_stats(self):
for resource in self.get_all_resources():
resource_type_name = resource._meta.verbose_name if hasattr(resource._meta, 'verbose_name') else resource._meta.model_name
self.resource_type_counts[resource_type_name] += 1
self.n_resources += 1
self.n_ratings = Rating.objects.all().count()
self.n_comments = ThreadedComment.objects.all().count()
def get_user_stats(self):
# FIXME revisit this with the hs_party application
for profile in UserProfile.objects.all():
if profile.organization_type in ('Government','Commercial'):
self.agencies.add(profile.organization)
else:
self.host_institutions.add(profile.organization)
self.user_professions[profile.profession] += 1
self.user_titles[profile.title] += 1
if profile.subject_areas:
self.user_subject_areas.update(a.strip() for a in profile.subject_areas.split(','))
self.n_host_institutions = len(self.host_institutions)
self.n_agencies = len(self.agencies)
| bsd-3-clause | -434,498,257,057,787,260 | 49.189189 | 134 | 0.675821 | false |
willemarcel/pontocerto | pontocerto/core/views.py | 1 | 1215 | from rest_framework.generics import ListAPIView
from django.core.urlresolvers import reverse
from django.contrib.gis.geos import Point
from django.forms import Form, CharField, FloatField
from django.http import HttpResponseRedirect
from django.views.generic.edit import FormView
from .models import Ponto
from .serializers import PontoSerializer
class GeojsonPontoList(ListAPIView):
queryset = Ponto.objects.all()
serializer_class = PontoSerializer
class PointForm(Form):
nome = CharField(max_length=100, required=False)
lat = FloatField(label="Latitude")
lon = FloatField(label="Longitude")
class CreatePointView(FormView):
template_name = 'core/create_point.html'
form_class = PointForm
def form_valid(self, form):
ponto = Ponto.objects.create(
nome=form.cleaned_data.get('nome'),
location=Point(
form.cleaned_data.get('lon'),
form.cleaned_data.get('lat')
)
)
url = reverse(
'admin:{0}_{1}_change'.format(
ponto._meta.app_label, ponto._meta.model_name
), args=(ponto.pk,)
)
return HttpResponseRedirect(url)
| agpl-3.0 | -8,883,890,015,690,834,000 | 28.634146 | 61 | 0.655144 | false |
fernandog/Medusa | medusa/server/web/home/post_process.py | 1 | 1966 | # coding=utf-8
from __future__ import unicode_literals
from medusa import process_tv
from medusa.helper.encoding import ss
from medusa.server.web.core import PageTemplate
from medusa.server.web.home.handler import Home
from six import string_types
from tornroutes import route
@route('/home/postprocess(/?.*)')
class HomePostProcess(Home):
def __init__(self, *args, **kwargs):
super(HomePostProcess, self).__init__(*args, **kwargs)
def index(self):
t = PageTemplate(rh=self, filename='home_postprocess.mako')
return t.render(topmenu='home', controller='home', action='postProcess')
def processEpisode(self, proc_dir=None, nzbName=None, jobName=None, quiet=None, process_method=None, force=None,
is_priority=None, delete_on='0', failed='0', proc_type='auto', ignore_subs=None, *args, **kwargs):
def argToBool(argument):
if isinstance(argument, string_types):
_arg = argument.strip().lower()
else:
_arg = argument
if _arg in ['1', 'on', 'true', True]:
return True
elif _arg in ['0', 'off', 'false', False]:
return False
return argument
if not proc_dir:
return self.redirect('/home/postprocess/')
else:
resource_name = ss(nzbName) if nzbName else None
result = process_tv.ProcessResult(ss(proc_dir), process_method=process_method).process(
resource_name=resource_name, force=argToBool(force), is_priority=argToBool(is_priority),
delete_on=argToBool(delete_on), failed=argToBool(failed), proc_type=type,
ignore_subs=argToBool(ignore_subs)
)
if quiet is not None and int(quiet) == 1:
return result
result = result.replace('\n', '<br>\n')
return self._genericMessage('Postprocessing results', result)
| gpl-3.0 | -5,933,431,452,205,526,000 | 34.107143 | 121 | 0.606307 | false |
CalthorpeAnalytics/urbanfootprint | footprint/main/management/commands/clear_future_built_forms.py | 1 | 1703 |
# UrbanFootprint v1.5
# Copyright (C) 2017 Calthorpe Analytics
#
# This file is part of UrbanFootprint version 1.5
#
# UrbanFootprint is distributed under the terms of the GNU General
# Public License version 3, as published by the Free Software Foundation. This
# code is distributed WITHOUT ANY WARRANTY, without implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License v3 for more details; see <http://www.gnu.org/licenses/>.
from optparse import make_option
import logging
from django.core.management.base import BaseCommand
from footprint.main.models.config.scenario import FutureScenario
from footprint.main.models.keys.keys import Keys
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""
This command clears all layer_selections
"""
option_list = BaseCommand.option_list + (
make_option('-r', '--resave', action='store_true', default=False,
help='Resave all the config_entities to trigger signals'),
make_option('--scenario', default='', help='String matching a key of or more Scenario to run'),
)
def handle(self, *args, **options):
scenarios = FutureScenario.objects.filter(key__contains=options['scenario']) if options[
'scenario'] else FutureScenario.objects.all()
for scenario in scenarios:
future_scenario_feature_class = scenario.db_entity_feature_class(DbEntityKey.FUTURE_SCENARIO)
for future_scenario_feature in future_scenario_feature_class.objects.exclude(built_form__isnull=True):
future_scenario_feature.built_form = None
future_scenario_feature.save()
| gpl-3.0 | -1,502,007,972,385,661,000 | 39.547619 | 114 | 0.711685 | false |
frostasm/qt-creator | tests/system/suite_editors/tst_rename_macros/test.py | 1 | 7824 | #############################################################################
##
## Copyright (C) 2015 The Qt Company Ltd.
## Contact: http://www.qt.io/licensing
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms and
## conditions see http://www.qt.io/terms-conditions. For further information
## use the contact form at http://www.qt.io/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 or version 3 as published by the Free
## Software Foundation and appearing in the file LICENSE.LGPLv21 and
## LICENSE.LGPLv3 included in the packaging of this file. Please review the
## following information to ensure the GNU Lesser General Public License
## requirements will be met: https://www.gnu.org/licenses/lgpl.html and
## http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, The Qt Company gives you certain additional
## rights. These rights are described in The Qt Company LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
source("../../shared/qtcreator.py")
cppEditorStr = ":Qt Creator_CppEditor::Internal::CPPEditorWidget"
def main():
global cppEditorStr
folder = prepareTemplate(os.path.abspath(os.path.join(os.getcwd(), "..", "shared",
"simplePlainCPP")))
if folder == None:
test.fatal("Could not prepare test files - leaving test")
return
proFile = os.path.join(folder, "testfiles.pro")
startApplication("qtcreator" + SettingsPath)
if not startedWithoutPluginError():
return
openQmakeProject(proFile)
if not testRenameMacroAfterSourceModification():
return
headerName = "anothertestfile.h"
addCPlusPlusFileToCurrentProject(headerName, "C++ Header File",
expectedHeaderName=headerName)
if not testRenameMacroAfterSourceMoving():
return
invokeMenuItem("File", "Save All")
invokeMenuItem("File", "Exit")
def testRenameMacroAfterSourceModification():
def __deleteAnyClass__():
global cppEditorStr
if platform.system() == 'Darwin':
type(cppEditorStr, "<Meta+Left>")
else:
type(cppEditorStr, "<Home>")
markText(cppEditorStr, "Down", 5)
type(cppEditorStr, "<Delete>")
test.log("Testing rename macro after modifying source.")
formerTexts = {}
content = openDocumentPlaceCursor("testfiles.Headers.testfile\\.h",
"class AnyClass", __deleteAnyClass__)
if not content:
return False
formerTexts["testfiles.Headers.testfile\\.h"] = content
content = openDocumentPlaceCursor("testfiles.Sources.testfile\\.cpp", "SOME_MACRO_NAME(a)")
if not content:
return False
formerTexts["testfiles.Sources.testfile\\.cpp"] = content
performMacroRenaming('SOME_OTHER_MACRO_NAME')
verifyChangedContent(formerTexts, "SOME_MACRO_NAME", "SOME_OTHER_MACRO_NAME")
revertChanges(formerTexts)
return True
def testRenameMacroAfterSourceMoving():
def __cut__():
global cppEditorStr
if platform.system() == 'Darwin':
type(cppEditorStr, "<Meta+Left>")
else:
type(cppEditorStr, "<Home>")
markText(cppEditorStr, "Down", 4)
invokeMenuItem("Edit", "Cut")
def __paste__():
global cppEditorStr
type(cppEditorStr, "<Return>")
invokeMenuItem("Edit", "Paste")
def __insertInclude__():
global cppEditorStr
typeLines(cppEditorStr, ['', '#include "anothertestfile.h"'])
test.log("Testing rename macro after moving source.")
formerTexts = {}
content = openDocumentPlaceCursor("testfiles.Headers.testfile\\.h",
"#define SOME_MACRO_NAME( X )\\", __cut__)
if not content:
return False
formerTexts["testfiles.Headers.testfile\\.h"] = content
content = openDocumentPlaceCursor("testfiles.Headers.anothertestfile\\.h",
"#define ANOTHERTESTFILE_H", __paste__)
if not content:
return False
formerTexts["testfiles.Headers.anothertestfile\\.h"] = content
content = openDocumentPlaceCursor('testfiles.Sources.testfile\\.cpp',
'#include "testfile.h"', __insertInclude__)
if not content:
return False
formerTexts["testfiles.Sources.testfile\\.cpp"] = content
placeCursorToLine(cppEditorStr, "SOME_MACRO_NAME(a)")
performMacroRenaming("COMPLETELY_DIFFERENT_MACRO_NAME")
verifyChangedContent(formerTexts, "SOME_MACRO_NAME", "COMPLETELY_DIFFERENT_MACRO_NAME")
revertChanges(formerTexts)
return True
def performMacroRenaming(newMacroName):
for i in range(10):
type(cppEditorStr, "<Left>")
invokeContextMenuItem(waitForObject(cppEditorStr), "Refactor",
"Rename Symbol Under Cursor")
waitForSearchResults()
validateSearchResult(2)
replaceLineEdit = waitForObject("{leftWidget={text='Replace with:' type='QLabel' "
"unnamed='1' visible='1'} "
"type='Core::Internal::WideEnoughLineEdit' unnamed='1' "
"visible='1' "
"window=':Qt Creator_Core::Internal::MainWindow'}")
replaceEditorContent(replaceLineEdit, newMacroName)
clickButton(waitForObject("{text='Replace' type='QToolButton' unnamed='1' visible='1' "
"window=':Qt Creator_Core::Internal::MainWindow'}"))
def verifyChangedContent(origTexts, replacedSymbol, replacement):
global cppEditorStr
successfullyCompared = []
for fileName,text in origTexts.iteritems():
if openDocument(fileName):
successfullyCompared.append(test.compare(waitForObject(cppEditorStr).plainText,
text.replace(replacedSymbol, replacement),
"Verifying content of %s" %
simpleFileName(fileName)))
else:
successfullyCompared.append(False)
test.fail("Failed to open document %s" % simpleFileName(fileName))
if successfullyCompared.count(True) == len(origTexts):
test.passes("Successfully compared %d changed files" % len(origTexts))
else:
test.fail("Verified %d files - %d have been successfully changed and %d failed to "
"change correctly." % (len(origTexts), successfullyCompared.count(True),
successfullyCompared.count(False)))
def revertChanges(files):
for f in files:
simpleName = simpleFileName(f)
if openDocument(f):
try:
invokeMenuItem('File', 'Revert "%s" to Saved' % simpleName)
clickButton(waitForObject(":Revert to Saved.Proceed_QPushButton"))
test.log("Reverted changes inside %s" % simpleName)
except:
test.warning("File '%s' cannot be reverted." % simpleName,
"Maybe it has not been changed at all.")
else:
test.fail("Could not open %s for reverting changes" % simpleName)
| gpl-3.0 | -6,147,371,400,253,073,000 | 44.488372 | 95 | 0.617587 | false |
viapath/zippy | zippy/zippylib/interval.py | 1 | 4651 | #!/usr/bin/env python
__doc__=="""Interval Lists"""
__author__ = "David Brawand"
__license__ = "MIT"
__version__ = "2.3.4"
__maintainer__ = "David Brawand"
__email__ = "[email protected]"
__status__ = "Production"
import sys
from math import ceil
class Interval(object):
def __init__(self,chrom,chromStart,chromEnd,name=None,reverse=None,sample=None):
self.chrom = chrom
self.chromStart = int(chromStart)
self.chromEnd = int(chromEnd)
assert self.chromStart <= self.chromEnd # make sure its on the forward genomic strand
self.name = name if name else chrom+':'+str(chromStart)+'-'+str(chromEnd)
self.strand = 0 if reverse is None else -1 if reverse else 1
self.sample = sample
self.subintervals = IntervalList([])
return
def midpoint(self):
return int(self.chromStart + (self.chromEnd - self.chromStart)/2.0)
def locus(self):
'''returns interval of variant'''
return ( self.chrom, self.chromStart, self.chromEnd )
def __hash__(self):
return hash(str(self))
def __len__(self):
return self.chromEnd - self.chromStart
def __eq__(self,other):
return hash(self) == hash(other)
def __lt__(self,other):
return (self.chrom, self.chromStart, self.chromEnd) < (other.chrom, other.chromStart, other.chromEnd)
def __repr__(self):
return "<Interval ("+self.name+") "+self.chrom+":"+str(self.chromStart)+'-'+str(self.chromEnd)+ \
" ["+str(self.strand)+"] len:"+str(len(self))+">"
def __str__(self):
return "\t".join(map(str,[self.chrom, self.chromStart, self.chromEnd, self.name]))
def tile(self,i,o,suffix=True): # interval, overlap
splitintervals = int(ceil( (len(self)-o) / float(i-o) )) # interval number
optimalsize = int(ceil( (len(self) + splitintervals*o - o) / float(splitintervals) )) # optimal interval size
# get tile spans (and number of exons)
tilespan = []
for n,tilestart in enumerate(range(self.chromStart, self.chromEnd, optimalsize-o)):
tileend = min(tilestart+optimalsize, self.chromEnd)
tilespan.append((tilestart,tileend))
if tileend == self.chromEnd:
break
tiles = []
for n,t in enumerate(tilespan):
tilenumber = len(tilespan)-n if self.strand < 0 else n+1
tiles.append(Interval(self.chrom,t[0],t[1],self.name+'_'+str(tilenumber) if suffix else None, self.strand < 0))
return tiles
def extend(self,flank):
self.chromStart = self.chromStart-flank if flank <= self.chromStart else 0
self.chromEnd = self.chromEnd+flank
return self
def overlap(self,other): # also returnd bookended
return self.chrom == other.chrom and \
not (other.chromEnd < self.chromStart or other.chromStart > self.chromEnd)
def merge(self,other,subintervals=False):
if self.chrom == other.chrom and self.strand == other.strand:
self.chromStart = other.chromStart if other.chromStart < self.chromStart else self.chromStart
self.chromEnd = other.chromEnd if other.chromEnd > self.chromEnd else self.chromEnd
self.name = self.name if other.name == self.name else self.name + '_' + other.name
if subintervals and (self.subintervals or other.subintervals):
self.subintervals += other.subintervals
self.flattenSubintervals()
def addSubintervals(self,add):
for e in add:
if e.chromStart < self.chromStart:
self.chromStart = e.chromStart
if e.chromEnd > self.chromEnd:
self.chromEnd = e.chromEnd
self.subintervals.append(e)
self.subintervals.sort()
def flattenSubintervals(self):
if self.subintervals:
self.subintervals.sort()
merged = [ self.subintervals[0] ]
for i in range(1,len(self.subintervals)):
if merged[-1].overlap(self.subintervals[i]):
merged[-1].merge(self.subintervals[i])
else:
merged.append(self.subintervals[i])
self.subintervals = IntervalList(merged)
'''list of intervals'''
class IntervalList(list):
def __init__(self,elements,source=None):
list.__init__(self, elements)
self.source = source # source of intervals
def __str__(self):
return "<IntervalList (%s) %d elements> " % (self.source, len(self))
def __repr__(self):
return "<IntervalList (%s) %d elements> " % (self.source, len(self))
| mit | -7,593,002,557,638,501,000 | 38.752137 | 123 | 0.609546 | false |
EmreAtes/spack | var/spack/repos/builtin/packages/intltool/package.py | 1 | 2529 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Intltool(AutotoolsPackage):
"""intltool is a set of tools to centralize translation of many different
file formats using GNU gettext-compatible PO files."""
homepage = 'https://freedesktop.org/wiki/Software/intltool/'
url = 'https://launchpad.net/intltool/trunk/0.51.0/+download/intltool-0.51.0.tar.gz'
list_url = 'https://launchpad.net/intltool/+download'
version('0.51.0', '12e517cac2b57a0121cda351570f1e63')
# requires XML::Parser perl module
depends_on('perl-xml-parser', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
def check(self):
# `make check` passes but causes `make install` to fail
pass
def _make_executable(self, name):
return Executable(join_path(self.prefix.bin, name))
def setup_dependent_package(self, module, dependent_spec):
# intltool is very likely to be a build dependency,
# so we add the tools it provides to the dependent module
executables = [
'intltool-extract',
'intltoolize',
'intltool-merge',
'intltool-prepare',
'intltool-update'
]
for name in executables:
setattr(module, name, self._make_executable(name))
| lgpl-2.1 | 8,018,918,679,636,101,000 | 40.459016 | 93 | 0.652432 | false |
OpenSecurityResearch/clipcaptcha | clipcaptcha.py | 1 | 7438 | #!/usr/bin/env python
"""clipcaptcha is a CAPTCHA Provider masquerading tool based off Moxie Marlinspike's SSLStrip codebase"""
# Copyright (c) 2012 Gursev Singh Kalra @ McAfee Foundstone
# Copyright (c) 2004-2011 Moxie Marlinspike
__author__ = "Gursev Singh Kalra"
__email__ = "[email protected]"
__license__= """
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
USA
"""
from twisted.web import http
from twisted.internet import reactor
from clipcaptcha.StrippingProxy import StrippingProxy
from clipcaptcha.URLMonitor import URLMonitor
from clipcaptcha.ProviderInfo import ProviderInfo
from clipcaptcha.Mode import Mode
from clipcaptcha.ClientRequest import ClientRequest
import sys, getopt, logging, traceback, string, os
gVersion = "0.1"
def usage():
print "=>> clipcaptcha " + gVersion + " by Gursev Singh Kalra"
print "Usage: clipcaptcha <mode> <options>"
print "Modes(choose one):"
print "\t-m , --monitor Listen and log. No changes made (default)"
print "\t-a , --avalanche Return success for all CAPTCHA validations"
print "\t-s <secret> , --stealth <secret> Stealth mode with secret string to approve our own submissions"
print "\t-d , --dos Return failure for all CAPTCHA validations"
print "\t-r , --random Return random success or failures for CAPTCHA validations"
print "Options:"
print "\t-c <filename> , --config=<filename> clipcaptcha Config file with CAPTCHA provider signatures (optional)"
print "\t-p <port> , --port=<port> Port to listen on (default 7777)."
print "\t-f <filename> , --file=<filename> Specify file to log to (default clipcaptcha.log)."
print "\t-l , --list List CAPTCHA providers available"
print "\t-h , --help Print this help message."
print ""
# print "-p , --post Log only SSL POSTs. (default)"
# print "-s , --ssl Log all SSL traffic to and from server."
# print "-a , --all Log all SSL and HTTP traffic to and from server."
# print "-k , --killsessions Kill sessions in progress."
def pickProvidersToClip():
providers = ProviderInfo.getProviders()
pIndexes = []
providersToClip = []
print "[+] Available CAPTCHA Providers =>"
i = 0
for provider in providers:
print "\t" + str(i) + ": \t" + provider.name
i = i+1
indexes = raw_input("[?] Choose CAPTCHA Providers by typing space separated indexes below or press enter to clip all : ")
indexes = indexes.split()
for i in indexes:
try:
idx = int(i)
if(idx > len(providers) - 1 or idx < 0):
print "[-] Indexes must represent a valid CAPTCHA provider. Exiting!"
sys.exit()
pIndexes.append(idx)
except ValueError:
print "[-] Indexes must be integers. Exiting!"
sys.exit()
pIndexes = list(set(pIndexes))
pIndexes.sort()
for i in pIndexes:
providersToClip.append(providers[i])
if(len(providersToClip) == 0):
return providers
return providersToClip
def parseOptions(argv):
modes = 0
logFile = 'clipcaptcha.log'
logLevel = logging.WARNING
listenPort = 7777
killSessions = False
operationMode = Mode.MONITOR
providersToClip = []
runningMode = ""
secretString = None
listProviders = False
configFile = "config.xml"
try:
#Parse the arguments and store the options in opts. args basically gets ignored.
# the ':' indicates that the option expects an argument to be passed.
opts, args = getopt.getopt(argv, "s:amdrf:p:lhc:", ["secret=", "monitor", "avalanche", "dos", "random", "file=", "port=", "list", "help", "config="])
# go over each option, store individual options in opt and arg. Then go through the ifelse structure and initialize various options.
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-m", "--monitor"):
operationMode = Mode.MONITOR
runningMode = "Monitor"
modes = modes + 1
elif opt in ("-a", "--avalanche"):
operationMode = Mode.AVALANCHE
runningMode = "Avalanche"
modes = modes + 1
elif opt in ("-s", "--stealth"):
secretString = arg
operationMode = Mode.STEALTH
runningMode = "Stealth"
modes = modes + 1
elif opt in ("-d", "--dos"):
operationMode = Mode.DOS
runningMode = "DoS"
modes = modes + 1
elif opt in ("-r", "--random"):
operationMode = Mode.RANDOM
runningMode = "Random"
modes = modes + 1
elif opt in ("-c", "--config"):
configFile = arg
elif opt in ("-f", "--file"):
logFile = arg
elif opt in ("-p", "--port"):
listenPort = arg
elif opt in ("-l", "--list"):
listProviders = True
# elif opt in ("-p", "--post"):
# logLevel = logging.WARNING
# elif opt in ("-s", "--ssl"):
# logLevel = logging.INFO
# elif opt in ("-a", "--all"):
# logLevel = logging.DEBUG
# elif opt in ("-k", "--killsessions"):
# killSessions = True
if(modes > 1):
print "[-] Choose only one mode."
print ""
usage()
sys.exit()
if(modes < 1):
print "[+] No mode selected. Defaulting to Monitor mode "
runningMode = "Monitor"
ProviderInfo.initProviders(configFile)
if(listProviders == True):
providers = ProviderInfo.getProviders()
print "Available CAPTCHA Providers:"
i = 0
for provider in providers:
print "\n\n######################################################################"
print "\t" + str(i) + ": \t" + provider.name
#print provider.hostname
#print provider.path
#print "============================ success =="
#print provider.sCode
#print provider.sCodeStr
#print provider.sHeaders
#print provider.sBody
#print "============================ failure =="
#print provider.fCode
#print provider.fCodeStr
#print provider.fHeaders
#print provider.fBody
i = i+1
sys.exit()
providersToClip = pickProvidersToClip()
clippedNames = []
for p in providersToClip:
clippedNames.append(p.name)
clipped = ", ".join(clippedNames)
print "[+] Cool, I am clipping these CAPTHA providers => "+ clipped
print "[+] Running in " + runningMode + " mode"
#return all options
return (logFile, logLevel, listenPort, killSessions, secretString, operationMode, providersToClip)
#Catch the exception, show usage and exit
except getopt.GetoptError:
usage()
sys.exit(2)
def main(argv):
(logFile, logLevel, listenPort, killSessions, secretString, operationMode, providersToClip) = parseOptions(argv)
logging.basicConfig(level=logLevel, format='%(asctime)s %(message)s', filename=logFile, filemode='w')
ClientRequest.setProvidersToClip(providersToClip)
ClientRequest.setOperationModeAndSecret(operationMode, secretString)
strippingFactory = http.HTTPFactory(timeout=10)
strippingFactory.protocol = StrippingProxy
reactor.listenTCP(int(listenPort), strippingFactory)
reactor.run()
if __name__ == '__main__':
main(sys.argv[1:])
| gpl-3.0 | -8,304,832,178,360,392,000 | 32.504505 | 151 | 0.670745 | false |
wallarelvo/CRoPS | code/mapparser.py | 1 | 3275 | #!/usr/bin/env python
import obstacle
import configuration as con
def mapVal(x, in_min, in_max, out_min, out_max):
"""
Maps a value that is between in_min and in_max to
a value between out_min and out_max
@param in_min The minimum value that the input value could be
@param in_max The maximum value that the input value could be
@param out_min The minimum value that the output value could be
@param out_max The maximum value that the output value could be
@return A scaled value based on a given input
"""
return int(
(x - in_min) *
(out_max - out_min) /
(in_max - in_min) +
out_min
)
def mparse(filename, staticObstacleList=list(), **kwargs):
"""
Parses a map file into a list of obstacles
@param filename The file name of the map file
@return A list of obstacles
"""
polyList = kwargs.get("nodes", list())
obstacleList = list()
try:
if filename is not None:
f = open(filename, "r+")
numberOfPolys = int(f.readline())
file_ext = filename.split(".")[-1]
# determine if obstacles are dynamic
if file_ext == "obstacles":
dynamicObstacle = True
else:
dynamicObstacle = False
# loop through file and create PolyObstacle objects
for _ in range(numberOfPolys):
# parse obstacle details
polyList = list()
line = [line for line in f.readline().split()[1:]]
intList = map(lambda s: int(float(s)), line)
polyList += [
[
(
mapVal(
intList[2*i],
-29,
29,
0,
con.Configuration.xSize
),
con.Configuration.ySize - mapVal(
intList[2*i + 1],
-29,
29,
0,
con.Configuration.ySize
)
) for i in range(len(intList) / 2)
]
]
# create and append PolyObstacle to obstacleList
obstacleList += [
obstacle.PolyObstacle(
pList,
con.Configuration.screen,
dynamic=dynamicObstacle
) for pList in polyList
]
else:
# auto generate dyanmic obstacles
for pList in polyList:
obst = obstacle.PolyObstacle(
pList,
con.Configuration.screen,
dynamic=True,
start_point=kwargs.get("start_point", None),
end_point=kwargs.get("end_point", None)
)
obstacleList.append(obst)
except Exception:
print("Error occured while parsing file [{0}]!".format(filename))
finally:
return obstacleList
| apache-2.0 | 2,307,528,285,008,669,700 | 33.473684 | 73 | 0.458015 | false |
wavky/ManHourCalendar | mhcalendar/job.py | 1 | 1583 | #!/usr/bin/env python3
# @Time : 17-9-2 01:53
# @Author : Wavky Huang
# @Contact : [email protected]
# @File : job.py
"""
Process information of the job.
"""
class Job:
def __init__(self, required_manhour=0, daily_work_hours=0, hourly_pay=0, max_daily_overhours=0):
"""
Define your job's condition.
:param required_manhour: monthly manhour required by company
:param daily_work_hours: daily work hours required by company
:param hourly_pay: hourly pay offers by company
:param max_daily_overhours: how many hours you can work overtime per day, while minus means unlimited
"""
self.required_manhour = required_manhour
self.daily_work_hours = daily_work_hours
self.hourly_pay = hourly_pay
self.max_daily_overhours = max_daily_overhours
if max_daily_overhours < 0:
self.max_daily_overhours = 24 - daily_work_hours
if daily_work_hours + max_daily_overhours > 24:
self.max_daily_overhours = 24 - daily_work_hours
print("daily_work_hours + max_daily_overhours > 24, max_daily_overhours has been set to {0}.".format(
self.max_daily_overhours))
def __str__(self):
return "Current Job: \t Require manhour = {0} \t Daily work hours = {1} \n\
\t\t Hourly pay = {2} \t\t Max daily overhours = {3}".format(self.required_manhour, self.daily_work_hours,
self.hourly_pay, self.max_daily_overhours)
def __repr__(self):
return self.__str__()
| mit | -7,290,178,517,325,429,000 | 37.609756 | 113 | 0.60897 | false |
exaile/exaile | xl/metadata/mka.py | 1 | 2816 | # Matroska tagger for Exaile
# Copyright (C) 2010 Johannes Sasongko <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
from xl.metadata import _base, _matroska
class MkaFormat(_base.BaseFormat):
others = False # For now, stick with defined tags only.
writable = False
tag_mapping = {
'album': ('TITLE', 50),
'album artist': ('ARTIST', 50),
'artist': ('ARTIST', 30),
'comment': ('COMMENT', 30),
'composer': ('COMPOSER', 30),
'date': ('DATE_RECORDED', 50),
'discnumber': ('PART_NUMBER', 50),
'genre': ('GENRE', 30),
'performer': ('PERFORMER', 30),
'title': ('TITLE', 30),
'tracknumber': ('PART_NUMBER', 30),
}
def _get_raw(self):
return self.tags
def load(self):
mka = _matroska.parse(self.loc)
segment = mka['Segment'][0]
info = segment['Info'][0]
try:
timecodescale = info['TimecodeScale'][0]
except KeyError:
timecodescale = 1000000
length = info['Duration'][0] * timecodescale / 1e9
self.tags = tags = {'__length': length}
for mkatags in segment['Tags']:
for mkatag in mkatags['Tag']:
target = int(mkatag['Targets'][0]['TargetTypevalue'][0])
for simpletag in mkatag['SimpleTag']:
key = (simpletag['TagName'][0], target)
try:
values = tags[key]
except KeyError:
values = tags[key] = []
values.append(simpletag['TagString'][0])
# vi: et sts=4 sw=4 ts=4
| gpl-2.0 | 762,459,540,885,645,600 | 36.052632 | 81 | 0.622869 | false |
google-research/falken | service/data_store/resource_store_test.py | 1 | 9426 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for ResourceStore."""
import time
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
from data_store import file_system
from data_store import resource_id
from data_store import resource_store
class ResourceStoreTest(parameterized.TestCase):
def setUp(self):
"""Create a datastore object that uses a temporary directory."""
super().setUp()
self._fs = file_system.FakeFileSystem()
self._mock_resource_encoder = mock.Mock()
self._mock_resource_resolver = mock.Mock()
self._resource_store = resource_store.ResourceStore(
self._fs, self._mock_resource_encoder,
self._mock_resource_resolver, dict)
@parameterized.named_parameters(
('Create Timestamp',
4221, 0, 0, 4221),
('Use Resource Id Timestamp',
0, 1234, 0, 1234),
('Read Resource Timestamp',
0, 0, 9876, 9876),)
@mock.patch.object(time, 'time')
def test_write(self, current_timestamp, resource_timestamp,
read_timestamp, expected_timestamp, mock_time):
"""Write an object using the current time."""
with mock.patch.object(self._fs, 'write_file') as mock_write_file:
with mock.patch.object(self._resource_store, 'read_timestamp_micros') as (
mock_read_timestamp_micros):
mock_time.return_value = current_timestamp / 1_000_000
self._mock_resource_resolver.get_timestamp_micros.return_value = (
resource_timestamp)
mock_read_timestamp_micros.return_value = read_timestamp
mock_resource = mock.Mock()
mock_resource_id = mock.MagicMock()
resource_path = 'a/resource/path'
mock_resource_id.__str__.return_value = resource_path
self._mock_resource_resolver.to_resource_id.return_value = (
mock_resource_id)
mock_resource_data = mock.Mock()
self._mock_resource_encoder.encode_resource.return_value = (
mock_resource_data)
self._resource_store.write(mock_resource)
self._mock_resource_resolver.to_resource_id.assert_called_once_with(
mock_resource)
(self._mock_resource_resolver.get_timestamp_micros
.assert_called_once_with(mock_resource))
# If the resource doesn't have have a timestamp,read the from the
# filesystem.
if not resource_timestamp:
mock_read_timestamp_micros.assert_called_once_with(mock_resource_id)
else:
mock_read_timestamp_micros.assert_not_called()
self._mock_resource_encoder.encode_resource.assert_called_once_with(
mock_resource_id, mock_resource)
mock_write_file.assert_called_once_with(
self._resource_store._get_path(resource_path, expected_timestamp),
mock_resource_data)
@parameterized.named_parameters(
('File not found', None, None, 0),
('Found file', 'a_resource', 3, 26))
def test_read(self, read_data, decoded_data, timestamp):
with mock.patch.object(self._fs, 'read_file') as mock_read_file:
with mock.patch.object(self._resource_store, 'read_timestamp_micros') as (
mock_read_timestamp_micros):
if read_data:
mock_read_file.return_value = read_data
else:
def raise_file_not_found_error(unused_path):
raise FileNotFoundError()
mock_read_file.side_effect = raise_file_not_found_error
mock_read_timestamp_micros.return_value = timestamp
self._mock_resource_encoder.decode_resource.return_value = decoded_data
mock_resource_id = 'a/resource'
if read_data:
self.assertEqual(decoded_data,
self._resource_store.read(mock_resource_id))
self._mock_resource_encoder.decode_resource.assert_called_once_with(
mock_resource_id, read_data)
else:
with self.assertRaises(resource_store.NotFoundError):
self._resource_store.read(mock_resource_id)
mock_read_file.assert_called_once_with(
self._resource_store._get_path(mock_resource_id, timestamp))
def test_read_by_proto_ids(self):
with mock.patch.object(self._resource_store, 'read') as mock_read:
with mock.patch.object(self._resource_store,
'resource_id_from_proto_ids') as (
mock_resource_id_from_proto_ids):
mock_attribute_type = mock.Mock()
mock_read.return_value = 42
mock_resource_id_from_proto_ids.return_value = 'a_resource_id'
self.assertEqual(
self._resource_store.read_by_proto_ids(
mock_attribute_type, foo='bar', bish='bosh'), 42)
mock_resource_id_from_proto_ids.assert_called_once_with(
attribute_type=mock_attribute_type, foo='bar', bish='bosh')
mock_read.assert_called_once_with('a_resource_id')
def test_list_by_proto_ids(self):
with mock.patch.object(self._resource_store, 'list') as mock_list:
with mock.patch.object(self._resource_store,
'resource_id_from_proto_ids') as (
mock_resource_id_from_proto_ids):
mock_attribute_type = mock.Mock()
list_result = ([10, 20, 30], 'a_pagination_token')
mock_list.return_value = list_result
mock_resource_id_from_proto_ids.return_value = 'a_resource_id'
self.assertEqual(
list_result,
self._resource_store.list_by_proto_ids(
attribute_type=mock_attribute_type, min_timestamp_micros=22,
page_token='previous_pagination_token', page_size=123,
time_descending=True, foo='bar', bish='bosh'))
mock_resource_id_from_proto_ids.assert_called_once_with(
attribute_type=mock_attribute_type, foo='bar', bish='bosh')
mock_list.assert_called_once_with(
'a_resource_id', min_timestamp_micros=22,
page_token='previous_pagination_token', page_size=123,
time_descending=True)
@mock.patch.object(time, 'time')
def test_get_timestamp_in_microseconds(self, mock_time):
"""Test getting the timestamp in microseconds."""
mock_time.return_value = 0.123
self.assertEqual(
resource_store.ResourceStore.get_timestamp_in_microseconds(), 123000)
@parameterized.named_parameters(
('No items', None, None, None),
('Has items', [5, 4, 3], 'page_token', 3))
def test_get_most_recent(self, listed_resource_ids, page_token,
expected_resource_id):
with mock.patch.object(self._resource_store, 'list') as mock_list:
mock_resource_id_glob = mock.Mock()
mock_list.return_value = (listed_resource_ids, page_token)
self.assertEqual(
self._resource_store.get_most_recent(mock_resource_id_glob),
expected_resource_id)
mock_list.assert_called_once_with(mock_resource_id_glob,
page_size=mock.ANY)
def test_decode_token(self):
self.assertEqual((12, 'ab'), self._resource_store._decode_token('12:ab'))
self.assertEqual((-1, ''), self._resource_store._decode_token(None))
def test_encode_token(self):
self.assertEqual('12:ab', self._resource_store._encode_token(12, 'ab'))
def test_to_resource_id(self):
mock_resource = mock.Mock()
mock_resource_id = 42
self._mock_resource_resolver.to_resource_id.return_value = mock_resource_id
self.assertEqual(self._resource_store.to_resource_id(mock_resource),
mock_resource_id)
self._mock_resource_resolver.to_resource_id.assert_called_with(
mock_resource)
@parameterized.named_parameters(
('No Attribute',
None,
dict(foo='bar', bish='bosh'),
{'foo_id': 'BAR', 'bish_id': 'BOSH'}),
('Attribute',
mock.MagicMock(),
dict(hello='goodbye', hola='adios'),
{resource_id.ATTRIBUTE: 'anattribute',
'hello_id': 'GOODBYE', 'hola_id': 'ADIOS'}),)
def test_resource_id_from_proto_ids(self, mock_attribute_type, proto_ids,
expected_resource_id):
self._mock_resource_resolver.resolve_attribute_name.return_value = (
'anattribute')
self._mock_resource_resolver.encode_proto_field.side_effect = (
lambda field, value: (field + '_id', value.upper()))
rid = self._resource_store.resource_id_from_proto_ids(
attribute_type=mock_attribute_type, **proto_ids)
self.assertEqual(rid, expected_resource_id)
if mock_attribute_type:
(self._mock_resource_resolver.resolve_attribute_name
.assert_called_once_with(mock_attribute_type))
self._mock_resource_resolver.encode_proto_field.assert_has_calls(
[mock.call(*item) for item in proto_ids.items()])
if __name__ == '__main__':
absltest.main()
| apache-2.0 | -872,606,445,756,310,800 | 41.845455 | 80 | 0.642372 | false |
pjdelport/django-cities | cities/conf.py | 1 | 12698 | # -*- coding: utf-8 -*-
from importlib import import_module
from collections import defaultdict
from django.conf import settings as django_settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext_lazy as _
__all__ = [
'city_types', 'district_types',
'import_opts', 'import_opts_all', 'HookException', 'settings',
'ALTERNATIVE_NAME_TYPES', 'CONTINENT_DATA', 'CURRENCY_SYMBOLS',
'INCLUDE_AIRPORT_CODES', 'INCLUDE_NUMERIC_ALTERNATIVE_NAMES',
'NO_LONGER_EXISTENT_COUNTRY_CODES', 'SKIP_CITIES_WITH_EMPTY_REGIONS',
'SLUGIFY_FUNCTION', 'VALIDATE_POSTAL_CODES',
]
url_bases = {
'geonames': {
'dump': 'http://download.geonames.org/export/dump/',
'zip': 'http://download.geonames.org/export/zip/',
},
}
files = {
'country': {
'filename': 'countryInfo.txt',
'urls': [url_bases['geonames']['dump'] + '{filename}', ],
'fields': [
'code',
'code3',
'codeNum',
'fips',
'name',
'capital',
'area',
'population',
'continent',
'tld',
'currencyCode',
'currencyName',
'phone',
'postalCodeFormat',
'postalCodeRegex',
'languages',
'geonameid',
'neighbours',
'equivalentFips'
]
},
'region': {
'filename': 'admin1CodesASCII.txt',
'urls': [url_bases['geonames']['dump'] + '{filename}', ],
'fields': [
'code',
'name',
'asciiName',
'geonameid',
]
},
'subregion': {
'filename': 'admin2Codes.txt',
'urls': [url_bases['geonames']['dump'] + '{filename}', ],
'fields': [
'code',
'name',
'asciiName',
'geonameid',
]
},
'city': {
'filename': 'cities5000.zip',
'urls': [url_bases['geonames']['dump'] + '{filename}', ],
'fields': [
'geonameid',
'name',
'asciiName',
'alternateNames',
'latitude',
'longitude',
'featureClass',
'featureCode',
'countryCode',
'cc2',
'admin1Code',
'admin2Code',
'admin3Code',
'admin4Code',
'population',
'elevation',
'gtopo30',
'timezone',
'modificationDate'
]
},
'hierarchy': {
'filename': 'hierarchy.zip',
'urls': [url_bases['geonames']['dump'] + '{filename}', ],
'fields': [
'parent',
'child',
'type',
]
},
'alt_name': {
'filename': 'alternateNames.zip',
'urls': [url_bases['geonames']['dump'] + '{filename}', ],
'fields': [
'nameid',
'geonameid',
'language',
'name',
'isPreferred',
'isShort',
'isColloquial',
'isHistoric',
]
},
'postal_code': {
'filename': 'allCountries.zip',
'urls': [url_bases['geonames']['zip'] + '{filename}', ],
'fields': [
'countryCode',
'postalCode',
'placeName',
'admin1Name',
'admin1Code',
'admin2Name',
'admin2Code',
'admin3Name',
'admin3Code',
'latitude',
'longitude',
'accuracy',
]
}
}
country_codes = [
'AD', 'AE', 'AF', 'AG', 'AI', 'AL', 'AM', 'AO', 'AQ', 'AR', 'AS', 'AT', 'AU', 'AW', 'AX', 'AZ',
'BA', 'BB', 'BD', 'BE', 'BF', 'BG', 'BH', 'BI', 'BJ', 'BL', 'BM', 'BN', 'BO', 'BQ', 'BR', 'BS', 'BT', 'BV', 'BW', 'BY', 'BZ',
'CA', 'CC', 'CD', 'CF', 'CG', 'CH', 'CI', 'CK', 'CL', 'CM', 'CN', 'CO', 'CR', 'CU', 'CV', 'CW', 'CX', 'CY', 'CZ',
'DE', 'DJ', 'DK', 'DM', 'DO', 'DZ',
'EC', 'EE', 'EG', 'EH', 'ER', 'ES', 'ET',
'FI', 'FJ', 'FK', 'FM', 'FO', 'FR',
'GA', 'GB', 'GD', 'GE', 'GF', 'GG', 'GH', 'GI', 'GL', 'GM', 'GN', 'GP', 'GQ', 'GR', 'GS', 'GT', 'GU', 'GW', 'GY',
'HK', 'HM', 'HN', 'HR', 'HT', 'HU',
'ID', 'IE', 'IL', 'IM', 'IN', 'IO', 'IQ', 'IR', 'IS', 'IT',
'JE', 'JM', 'JO', 'JP',
'KE', 'KG', 'KH', 'KI', 'KM', 'KN', 'KP', 'KR', 'XK', 'KW', 'KY', 'KZ',
'LA', 'LB', 'LC', 'LI', 'LK', 'LR', 'LS', 'LT', 'LU', 'LV', 'LY',
'MA', 'MC', 'MD', 'ME', 'MF', 'MG', 'MH', 'MK', 'ML', 'MM', 'MN', 'MO', 'MP', 'MQ', 'MR', 'MS', 'MT', 'MU', 'MV', 'MW', 'MX', 'MY', 'MZ',
'NA', 'NC', 'NE', 'NF', 'NG', 'NI', 'NL', 'NO', 'NP', 'NR', 'NU', 'NZ',
'OM',
'PA', 'PE', 'PF', 'PG', 'PH', 'PK', 'PL', 'PM', 'PN', 'PR', 'PS', 'PT', 'PW', 'PY',
'QA',
'RE', 'RO', 'RS', 'RU', 'RW',
'SA', 'SB', 'SC', 'SD', 'SS', 'SE', 'SG', 'SH', 'SI', 'SJ', 'SK', 'SL', 'SM', 'SN', 'SO', 'SR', 'ST', 'SV', 'SX', 'SY', 'SZ',
'TC', 'TD', 'TF', 'TG', 'TH', 'TJ', 'TK', 'TL', 'TM', 'TN', 'TO', 'TR', 'TT', 'TV', 'TW', 'TZ',
'UA', 'UG', 'UM', 'US', 'UY', 'UZ',
'VA', 'VC', 'VE', 'VG', 'VI', 'VN', 'VU',
'WF', 'WS',
'YE', 'YT',
'ZA', 'ZM', 'ZW',
]
_ALTERNATIVE_NAME_TYPES = (
('name', _("Name")),
('abbr', _("Abbreviation")),
('link', _("Link")),
)
_AIRPORT_TYPES = (
('iata', _("IATA (Airport) Code")),
('icao', _("ICAO (Airport) Code")),
('faac', _("FAAC (Airport) Code")),
)
CONTINENT_DATA = {
'AF': ('Africa', 6255146),
'AS': ('Asia', 6255147),
'EU': ('Europe', 6255148),
'NA': ('North America', 6255149),
'OC': ('Oceania', 6255151),
'SA': ('South America', 6255150),
'AN': ('Antarctica', 6255152),
}
_CURRENCY_SYMBOLS = {
"AED": "د.إ", "AFN": "؋", "ALL": "L", "AMD": "դր.", "ANG": "ƒ", "AOA": "Kz",
"ARS": "$", "AUD": "$", "AWG": "ƒ", "AZN": "m",
"BAM": "KM", "BBD": "$", "BDT": "৳", "BGN": "лв", "BHD": "ب.د", "BIF": "Fr",
"BMD": "$", "BND": "$", "BOB": "Bs.", "BRL": "R$", "BSD": "$", "BTN": "Nu",
"BWP": "P", "BYR": "Br", "BZD": "$",
"CAD": "$", "CDF": "Fr", "CHF": "Fr", "CLP": "$", "CNY": "¥", "COP": "$",
"CRC": "₡", "CUP": "$", "CVE": "$, Esc", "CZK": "Kč",
"DJF": "Fr", "DKK": "kr", "DOP": "$", "DZD": "د.ج",
"EEK": "KR", "EGP": "£,ج.م", "ERN": "Nfk", "ETB": "Br", "EUR": "€",
"FJD": "$", "FKP": "£",
"GBP": "£", "GEL": "ლ", "GHS": "₵", "GIP": "£", "GMD": "D", "GNF": "Fr",
"GTQ": "Q", "GYD": "$",
"HKD": "$", "HNL": "L", "HRK": "kn", "HTG": "G", "HUF": "Ft",
"IDR": "Rp", "ILS": "₪", "INR": "₨", "IQD": "ع.د", "IRR": "﷼", "ISK": "kr",
"JMD": "$", "JOD": "د.ا", "JPY": "¥",
"KES": "Sh", "KGS": "лв", "KHR": "៛", "KMF": "Fr", "KPW": "₩", "KRW": "₩",
"KWD": "د.ك", "KYD": "$", "KZT": "Т",
"LAK": "₭", "LBP": "ل.ل", "LKR": "ரூ", "LRD": "$", "LSL": "L", "LTL": "Lt",
"LVL": "Ls", "LYD": "ل.د",
"MAD": "د.م.", "MDL": "L", "MGA": "Ar", "MKD": "ден", "MMK": "K",
"MNT": "₮", "MOP": "P", "MRO": "UM", "MUR": "₨", "MVR": "ރ.", "MWK": "MK",
"MXN": "$", "MYR": "RM", "MZN": "MT",
"NAD": "$", "NGN": "₦", "NIO": "C$", "NOK": "kr", "NPR": "₨", "NZD": "$",
"OMR": "ر.ع.",
"PAB": "B/.", "PEN": "S/.", "PGK": "K", "PHP": "₱", "PKR": "₨", "PLN": "zł",
"PYG": "₲",
"QAR": "ر.ق",
"RON": "RON", "RSD": "RSD", "RUB": "р.", "RWF": "Fr",
"SAR": "ر.س", "SBD": "$", "SCR": "₨", "SDG": "S$", "SEK": "kr", "SGD": "$",
"SHP": "£", "SLL": "Le", "SOS": "Sh", "SRD": "$", "STD": "Db",
"SYP": "£, ل.س", "SZL": "L",
"THB": "฿", "TJS": "ЅМ", "TMT": "m", "TND": "د.ت", "TOP": "T$", "TRY": "₤",
"TTD": "$", "TWD": "$", "TZS": "Sh",
"UAH": "₴", "UGX": "Sh", "USD": "$", "UYU": "$", "UZS": "лв",
"VEF": "Bs", "VND": "₫", "VUV": "Vt",
"WST": "T",
"XAF": "Fr", "XCD": "$", "XOF": "Fr", "XPF": "Fr",
"YER": "﷼",
"ZAR": "R", "ZMK": "ZK", "ZWL": "$",
}
_NO_LONGER_EXISTENT_COUNTRY_CODES = ['CS', 'AN']
_SLUGIFY_FUNCTION = getattr(django_settings, 'CITIES_SLUGIFY_FUNCTION', 'cities.util.default_slugify')
# See http://www.geonames.org/export/codes.html
city_types = ['PPL', 'PPLA', 'PPLC', 'PPLA2', 'PPLA3', 'PPLA4', 'PPLG']
district_types = ['PPLX']
# Command-line import options
import_opts = [
'all',
'country',
'region',
'subregion',
'city',
'district',
'alt_name',
'postal_code',
]
import_opts_all = [
'country',
'region',
'subregion',
'city',
'district',
'alt_name',
'postal_code',
]
# Raise inside a hook (with an error message) to skip the current line of data.
class HookException(Exception):
pass
# Hook functions that a plugin class may define
plugin_hooks = [
'country_pre', 'country_post', # noqa: E241
'region_pre', 'region_post', # noqa: E241
'subregion_pre', 'subregion_post', # noqa: E241
'city_pre', 'city_post', # noqa: E241
'district_pre', 'district_post', # noqa: E241
'alt_name_pre', 'alt_name_post', # noqa: E241
'postal_code_pre', 'postal_code_post', # noqa: E241
]
def create_settings():
def get_locales(self):
if hasattr(django_settings, "CITIES_LOCALES"):
locales = django_settings.CITIES_LOCALES[:]
else:
locales = ['en', 'und']
try:
locales.remove('LANGUAGES')
locales += [e[0] for e in django_settings.LANGUAGES]
except:
pass
return set([e.lower() for e in locales])
res = type('settings', (), {
'locales': property(get_locales),
})
res.files = files.copy()
if hasattr(django_settings, "CITIES_FILES"):
for key in list(django_settings.CITIES_FILES.keys()):
if 'filenames' in django_settings.CITIES_FILES[key] and 'filename' in django_settings.CITIES_FILES[key]:
raise ImproperlyConfigured(
"Only one key should be specified for '%s': 'filename' of 'filenames'. Both specified instead" % key
)
res.files[key].update(django_settings.CITIES_FILES[key])
if 'filenames' in django_settings.CITIES_FILES[key]:
del res.files[key]['filename']
if hasattr(django_settings, "CITIES_DATA_DIR"):
res.data_dir = django_settings.CITIES_DATA_DIR
if hasattr(django_settings, "CITIES_POSTAL_CODES"):
res.postal_codes = set([e.upper() for e in django_settings.CITIES_POSTAL_CODES])
else:
res.postal_codes = set(['ALL'])
return res()
def create_plugins():
settings.plugins = defaultdict(list)
for plugin in django_settings.CITIES_PLUGINS:
module_path, classname = plugin.rsplit('.', 1)
module = import_module(module_path)
class_ = getattr(module, classname)
obj = class_()
[settings.plugins[hook].append(obj) for hook in plugin_hooks if hasattr(obj, hook)]
settings = create_settings()
if hasattr(django_settings, "CITIES_PLUGINS"):
create_plugins()
if hasattr(django_settings, 'CITIES_IGNORE_EMPTY_REGIONS'):
raise Exception("CITIES_IGNORE_EMPTY_REGIONS was ambiguous and has been moved to CITIES_SKIP_CITIES_WITH_EMPTY_REGIONS")
SKIP_CITIES_WITH_EMPTY_REGIONS = getattr(django_settings, 'CITIES_SKIP_CITIES_WITH_EMPTY_REGIONS', False)
# Users may way to import historical countries
NO_LONGER_EXISTENT_COUNTRY_CODES = getattr(
django_settings, 'CITIES_NO_LONGER_EXISTENT_COUNTRY_CODES',
_NO_LONGER_EXISTENT_COUNTRY_CODES)
# Users may not want to include airport codes as alternative city names
INCLUDE_AIRPORT_CODES = getattr(django_settings, 'CITIES_INCLUDE_AIRPORT_CODES', True)
if INCLUDE_AIRPORT_CODES:
_ALTERNATIVE_NAME_TYPES += _AIRPORT_TYPES
# A `Choices` object (from `django-model-utils`)
ALTERNATIVE_NAME_TYPES = getattr(django_settings, 'CITIES_ALTERNATIVE_NAME_TYPES', _ALTERNATIVE_NAME_TYPES)
INCLUDE_NUMERIC_ALTERNATIVE_NAMES = getattr(django_settings, 'CITIES_INCLUDE_NUMERIC_ALTERNATIVE_NAMES', True)
# Allow users to override specified contents
CONTINENT_DATA.update(getattr(django_settings, 'CITIES_CONTINENT_DATA', {}))
CURRENCY_SYMBOLS = getattr(django_settings, 'CITIES_CURRENCY_SYMBOLS', _CURRENCY_SYMBOLS)
module_name, _, function_name = _SLUGIFY_FUNCTION.rpartition('.')
SLUGIFY_FUNCTION = getattr(import_module(module_name), function_name)
# Users who want better postal codes can flip this on (developers of
# django-cities itself probably will), but most probably won't want to
VALIDATE_POSTAL_CODES = getattr(django_settings, 'CITIES_VALIDATE_POSTAL_CODES', False)
| mit | -4,502,327,756,119,079,400 | 33.479452 | 141 | 0.487167 | false |
kamekoopa/git-lab | git_lab/apis/mergerequest/repositories.py | 1 | 2549 | # -*- coding: utf-8 -*-
from git_lab.apis.mergerequest.models import MergeRequest, Note
class MergeRequestRepository(object):
def __init__(self, client=None, project=None):
u"""
@param client : GitLabクライアント
@type client : gitlab.Gitlab
"""
from git_lab.utils import get_client, get_project
self.client = client if client is not None else get_client()
self.project = project if project is not None else get_project()
def get_request(self, req_id):
mr = self.client.getmergerequest(self.project, req_id)
if mr is False:
return None
else:
return MergeRequest(mr)
def get_notes(self, req_id):
u"""指定されたマージリクエストIDに紐づくノートの一覧を取得します
@param req_id : マージリクエストID
@type req_id : int
@return : ノートのリスト
@rtype : list of Note
"""
notes = self.client.getmergerequestnotes(self.project, req_id)
if notes is False:
return []
else:
results = []
for note in notes:
results.append(Note(note))
return results
def get_requests(self, page=1, per_page=20):
u"""
@param page : ページ数
@type page : int
@param per_page : ページ当たりの取得数
@type per_page : int
@return : マージリクエストのリスト
@rtype : list of MergeRequest
"""
mrs = self.client.getmergerequests(self.project, page=page, per_page=per_page)
if mrs is False:
return []
else:
result = []
for mr in mrs:
result.append(MergeRequest(mr))
return result
def create_requests(self, source_branch, target_project_id, target_branch, title):
u"""
@param source_branch : 送り元ブランチ
@type source_branch : str
@param target_project_id : 送り先プロジェクト
@type target_project_id : str | None
@param target_branch : 送り先ブランチ
@type target_branch : str
@param title : タイトル
@type title : str
@return : 成否
@rtype : bool
"""
return self.client.createmergerequest2(
self.project,
source_branch,
target_project_id,
target_branch,
title
)
| apache-2.0 | -8,188,333,652,436,813,000 | 24.150538 | 86 | 0.545105 | false |
qateam123/eq | app/utilities/schema.py | 1 | 1339 | import logging
from app import cache
from app.parser.v0_0_1.schema_parser import SchemaParser
from app.schema_loader.schema_loader import load_schema
logger = logging.getLogger(__name__)
def get_schema(metadata):
"""
Get the schema for the current user
:return: (json, schema) # Tuple of json and schema object from schema file
"""
eq_id = metadata["eq_id"]
form_type = metadata["form_type"]
language_code = metadata["language_code"] if "language_code" in metadata else None
logger.debug("Requested questionnaire %s for form type %s", eq_id, form_type)
json_schema, schema = load_and_parse_schema(eq_id, form_type, language_code)
return json_schema, schema
@cache.memoize()
def load_and_parse_schema(eq_id, form_type, language_code):
"""
Use the schema loader to get the schema from disk. Then use the parse to construct the object schema
:param eq_id: the id of the questionnaire
:param form_type: the form type
:return: (json, schema) # Tuple of json and schema object from schema file
"""
# load the schema
json_schema = load_schema(eq_id, form_type, language_code)
if json_schema:
parser = SchemaParser(json_schema)
schema = parser.parse()
return json_schema, schema
else:
raise ValueError("No schema available")
| mit | 4,259,916,970,787,906,000 | 30.880952 | 104 | 0.68708 | false |
Smashman/UKNetrunnerRankings | app/tournament/models.py | 1 | 1715 | import datetime
from app import db
class Tournament(db.Model):
id = db.Column(db.Integer, primary_key=True)
upload_user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
uploaded = db.Column(db.DateTime, default=datetime.datetime.utcnow)
date = db.Column(db.Date)
type = db.Column(db.Enum('sc', 'regi', 'nati'))
location = db.Column(db.String(1024))
mwl = db.Column(db.Boolean)
filename = db.Column(db.String(256))
file_type = db.Column(db.Enum('txt', 'json'))
participants = db.relationship('Participant', backref=db.backref('tournament'))
upload_user = db.relationship('User', backref='tournament')
def __init__(self, filename):
self.filename = filename
self.file_type = filename.split('.')[-1]
class Participant(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
tournament_id = db.Column(db.Integer, db.ForeignKey('tournament.id'))
runner_ident_id = db.Column(db.Integer, db.ForeignKey('identity.id'))
corp_ident_id = db.Column(db.Integer, db.ForeignKey('identity.id'))
user = db.relationship('User', backref='participant')
runner_ident = db.relationship('Identity', foreign_keys=runner_ident_id)
corp_ident = db.relationship('Identity', foreign_keys=corp_ident_id)
class Result(db.Model):
id = db.Column(db.Integer, primary_key=True)
participant_id = db.Column(db.Integer, db.ForeignKey('participant.id'))
position = db.Column(db.Integer)
points = db.Column(db.Integer)
strength_of_schedule = db.Column(db.Float)
extended_sos = db.Column(db.Float)
participant = db.relationship('Participant', backref='result') | gpl-2.0 | -491,337,908,336,923,500 | 38 | 83 | 0.683965 | false |
michael-lazar/praw3 | setup.py | 1 | 2609 | """praw setup.py"""
import re
from codecs import open
from os import path
from setuptools import setup
PACKAGE_NAME = 'praw'
HERE = path.abspath(path.dirname(__file__))
with open(path.join(HERE, 'README.rst'), encoding='utf-8') as fp:
README = fp.read()
with open(path.join(HERE, PACKAGE_NAME, '__init__.py'),
encoding='utf-8') as fp:
VERSION = re.search("__version__ = '([^']+)'", fp.read()).group(1)
dependencies = ['decorator >=4.0.9, <4.1',
'requests >=2.4.0',
'six ==1.10']
try:
from OpenSSL import __version__ as opensslversion
opensslversion = [int(minor) if minor.isdigit() else minor
for minor in opensslversion.split('.')]
if opensslversion < [0, 15]: # versions less than 0.15 have a regression
dependencies.append('pyopenssl >=0.15')
except ImportError:
pass # open ssl not installed
setup(name=PACKAGE_NAME,
author='Timothy Mellor',
author_email='[email protected]',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Utilities'],
description=('PRAW, an acronym for `Python Reddit API Wrapper`, is a '
'python package that allows for simple access to '
'reddit\'s API.'),
entry_points={'console_scripts': [
'praw-multiprocess = praw.multiprocess:run']},
install_requires=dependencies,
keywords='reddit api wrapper',
license='GPLv3',
long_description=README,
maintainer='Michael Lazar',
maintainer_email='[email protected]',
package_data={'': ['COPYING'], PACKAGE_NAME: ['*.ini']},
packages=[PACKAGE_NAME],
tests_require=['betamax >=0.5.1, <0.6',
'betamax-matchers >=0.2.0, <0.3',
'betamax-serializers >=0.1.1, <0.2',
'mock ==1.0.1'],
test_suite='tests',
url='https://praw.readthedocs.io/',
version=VERSION)
| gpl-3.0 | -5,172,743,039,084,221,000 | 37.940299 | 77 | 0.579149 | false |
sadjadasghari/deeplab4a2d | loss_from_log.py | 1 | 5089 | #!/usr/bin/env python
# Martin Kersner, 2016/03/11
from __future__ import print_function
import sys
import re
import numpy as np
import matplotlib.pyplot as plt
from utils import strstr
def main():
output_data, log_files = process_arguments(sys.argv)
train_iteration = []
train_loss = []
train_accuracy0 = []
train_accuracy1 = []
train_accuracy2 = []
train_accuracy3 = []
train_accuracy4 = []
train_accuracy5 = []
base_train_iter = 0
for log_file in log_files:
with open(log_file, 'rb') as f:
if len(train_iteration) != 0:
base_train_iter = train_iteration[-1]
for line in f:
if strstr(line, 'Iteration') and strstr(line, 'loss'):
matched = match_loss(line)
train_loss.append(float(matched.group(1)))
matched = match_iteration(line)
train_iteration.append(int(matched.group(1))+base_train_iter)
# strong labels
elif strstr(line, 'Train net output #0: accuracy '):
matched = match_net_accuracy(line)
train_accuracy0.append(float(matched.group(1)))
elif strstr(line, 'Train net output #1: accuracy '):
matched = match_net_accuracy(line)
train_accuracy1.append(float(matched.group(1)))
elif strstr(line, 'Train net output #2: accuracy '):
matched = match_net_accuracy(line)
train_accuracy2.append(float(matched.group(1)))
# weak labels
elif strstr(line, 'Train net output #0: accuracy_bbox'):
matched = match_net_accuracy_bbox(line)
train_accuracy0.append(float(matched.group(1)))
elif strstr(line, 'Train net output #1: accuracy_bbox'):
matched = match_net_accuracy_bbox(line)
train_accuracy1.append(float(matched.group(1)))
elif strstr(line, 'Train net output #2: accuracy_bbox'):
matched = match_net_accuracy_bbox(line)
train_accuracy2.append(float(matched.group(1)))
elif strstr(line, 'Train net output #3: accuracy_strong'):
matched = match_net_accuracy_strong(line)
train_accuracy3.append(float(matched.group(1)))
elif strstr(line, 'Train net output #4: accuracy_strong'):
matched = match_net_accuracy_strong(line)
train_accuracy4.append(float(matched.group(1)))
elif strstr(line, 'Train net output #5: accuracy_strong'):
matched = match_net_accuracy_strong(line)
train_accuracy5.append(float(matched.group(1)))
if output_data == 'loss':
for x in train_loss:
print(x)
if output_data == 'acc1':
for x,y,z in zip(train_accuracy0, train_accuracy1, train_accuracy2):
print(x, y, z)
if output_data == 'acc2':
for x,y,z in zip(train_accuracy3, train_accuracy4, train_accuracy5):
print(x, y, z)
## loss
plt.plot(train_iteration, train_loss, 'k', label='Train loss')
plt.legend()
plt.ylabel('Loss')
plt.xlabel('Number of iterations')
plt.savefig('loss.png')
## evaluation
plt.clf()
if len(train_accuracy3) != 0:
plt.plot(range(len(train_accuracy0)), train_accuracy0, 'k', label='accuracy bbox 0')
plt.plot(range(len(train_accuracy1)), train_accuracy1, 'r', label='accuracy bbox 1')
plt.plot(range(len(train_accuracy2)), train_accuracy2, 'g', label='accuracy bbox 2')
plt.plot(range(len(train_accuracy3)), train_accuracy3, 'b', label='accuracy strong 0')
plt.plot(range(len(train_accuracy4)), train_accuracy4, 'c', label='accuracy strong 1')
plt.plot(range(len(train_accuracy5)), train_accuracy5, 'm', label='accuracy strong 2')
else:
plt.plot(range(len(train_accuracy0)), train_accuracy0, 'k', label='train accuracy 0')
plt.plot(range(len(train_accuracy1)), train_accuracy1, 'r', label='train accuracy 1')
plt.plot(range(len(train_accuracy2)), train_accuracy2, 'g', label='train accuracy 2')
plt.legend(loc=0)
plt.savefig('evaluation.png')
def match_iteration(line):
return re.search(r'Iteration (.*),', line)
def match_loss(line):
return re.search(r'loss = (.*)', line)
def match_net_accuracy(line):
return re.search(r'accuracy = (.*)', line)
def match_net_accuracy_bbox(line):
return re.search(r'accuracy_bbox = (.*)', line)
def match_net_accuracy_strong(line):
return re.search(r'accuracy_strong = (.*)', line)
def process_arguments(argv):
if len(argv) < 2:
help()
output_data = None
log_files = argv[2:]
if argv[1].lower() == 'loss':
output_data = 'loss'
elif argv[1].lower() == 'acc1':
output_data = 'acc1'
elif argv[1].lower() == 'acc2':
output_data = 'acc2'
else:
log_files = argv[1:]
return output_data, log_files
def help():
print('Usage: python loss_from_log.py [OUTPUT_TYPE] [LOG_FILE]+\n'
'OUTPUT_TYPE can be either loss, acc1 or acc 2\n'
'LOG_FILE is text file containing log produced by caffe.\n'
'At least one LOG_FILE has to be specified.\n'
'Files has to be given in correct order (the oldest logs as the first ones).'
, file=sys.stderr)
exit()
if __name__ == '__main__':
main()
| gpl-3.0 | 5,591,932,198,186,486,000 | 31.208861 | 90 | 0.63706 | false |
pjryan126/solid-start-careers | store/api/zillow/venv/lib/python2.7/site-packages/Quandl/Quandl.py | 1 | 15310 |
"""
Quandl's API for Python.
Currently supports getting, searching, and pushing datasets.
"""
from __future__ import (print_function, division, absolute_import,
unicode_literals)
import pickle
import datetime
import json
import pandas as pd
import re
from dateutil import parser
from numpy import genfromtxt
import requests
try:
from urllib.error import HTTPError # Python 3
from urllib.parse import urlencode
from urllib.request import Request, urlopen
strings = str
except ImportError:
from urllib import urlencode # Python 2
from urllib2 import HTTPError, Request, urlopen
strings = unicode
#Base API call URL
QUANDL_API_URL = 'https://www.quandl.com/api/v1/'
VERSION = '2.8.7'
def get(dataset, **kwargs):
"""Return dataframe of requested dataset from Quandl.
:param dataset: str or list, depending on single dataset usage or multiset usage
Dataset codes are available on the Quandl website
:param str authtoken: Downloads are limited to 10 unless token is specified
:param str trim_start, trim_end: Optional datefilers, otherwise entire
dataset is returned
:param str collapse: Options are daily, weekly, monthly, quarterly, annual
:param str transformation: options are diff, rdiff, cumul, and normalize
:param int rows: Number of rows which will be returned
:param str sort_order: options are asc, desc. Default: `asc`
:param str returns: specify what format you wish your dataset returned as,
either `numpy` for a numpy ndarray or `pandas`. Default: `pandas`
:param bool verbose: specify whether to print output text to stdout, default is False.
:param str text: Deprecated. Use `verbose` instead.
:returns: :class:`pandas.DataFrame` or :class:`numpy.ndarray`
Note that Pandas expects timeseries data to be sorted ascending for most
timeseries functionality to work.
Any other `kwargs` passed to `get` are sent as field/value params to Quandl
with no interference.
"""
#Check whether dataset is given as a string (for a single dataset) or an array (for a multiset call)
#Unicode String
if type(dataset) == strings or type(dataset) == str:
if '.' in dataset:
dataset_temp = dataset.split('.')
dataset = dataset_temp[0]
dataset_columns = dataset_temp[1]
kwargs.update({'column':dataset_columns})
url = QUANDL_API_URL + 'datasets/{}.csv?'.format(dataset)
#Array
elif type(dataset) == list:
multiple_dataset_dataframe = pd.DataFrame()
for i in dataset:
try:
d = get(i,**kwargs)
except DatasetNotFound:
d = pd.DataFrame({'NOT FOUND': []})
# format dataset name for column name
specific_column_name = i.split('.')[0].replace('/','.')
d.rename(columns = lambda x: specific_column_name + ' - ' + x, inplace = True)
multiple_dataset_dataframe = pd.merge(multiple_dataset_dataframe,d,right_index = True,left_index = True,how='outer')
return multiple_dataset_dataframe
#If wrong format
else:
error = "Your dataset must either be specified as a string (containing a Quandl code) or an array (of Quandl codes)"
raise WrongFormat(error)
#set parameters
kwargs.setdefault('sort_order', 'asc')
verbose = kwargs.get('verbose', False)
if 'text' in kwargs:
print('Deprecated: "text" is deprecated and will be removed in next release, use "verbose" instead.')
if isinstance(kwargs['text'], (strings, str)):
if kwargs['text'].lower() in ['yes', 'y', 't', 'true', 'on']:
verbose = True
else:
verbose = bool(kwargs['text'])
auth_token = _getauthtoken(kwargs.pop('authtoken', ''), verbose)
trim_start = _parse_dates(kwargs.pop('trim_start', None))
trim_end = _parse_dates(kwargs.pop('trim_end', None))
returns = kwargs.get('returns', 'pandas')
#Append all parameters to API call
url = _append_query_fields(url,
auth_token=auth_token,
trim_start=trim_start,
trim_end=trim_end,
**kwargs)
if returns == 'url':
return url # for test purpose
try:
urldata = _download(url)
if verbose and verbose != 'no':
print("Returning Dataframe for ", dataset)
#Error catching
except HTTPError as e:
#API limit reached
if str(e) == 'HTTP Error 403: Forbidden':
error = 'API daily call limit exceeded. Contact us at [email protected] if you want an increased daily limit'
raise CallLimitExceeded(error)
#Dataset not found
elif str(e) == 'HTTP Error 404: Not Found':
error = "Dataset not found. Check Quandl code: {} for errors".format(dataset)
raise DatasetNotFound(error)
#Catch all
else:
if verbose and verbose != 'no':
print("url:", url)
error = "Error Downloading! {}".format(e)
raise ErrorDownloading(error)
if returns == 'numpy':
return urldata.to_records()
return urldata
def push(data, code, name, authtoken='', desc='', override=False, verbose=False, text=None):
''' Upload a pandas Dataframe to Quandl and returns link to the dataset.
:param data: (required), pandas ts or numpy array
:param str code: (required), Dataset code
must consist of only capital letters, numbers, and underscores
:param str name: (required), Dataset name
:param str authtoken: (required), to upload data
:param str desc: (optional), Description of dataset
:param bool verbose: specify whether to print output text to stdout, default is False.
:param str text: Deprecated. Use `verbose` instead.
:returns: :str: link to uploaded dataset'''
if text is not None:
print('Deprecated: "text" is deprecated and will be removed in next release, use "verbose" instead.')
verbose = text
token = _getauthtoken(authtoken, verbose)
if token == '':
error = ("You need an API token to upload your data to Quandl, "
"please see www.quandl.com/API for more information.")
raise MissingToken(error)
#test that code is acceptable format
_pushcodetest(code)
datestr = ''
# Verify and format the data for upload.
if not isinstance(data, pd.core.frame.DataFrame):
error = "only pandas DataFrames are accepted for upload at this time"
raise ValueError(error)
# check if indexed by date
data_interm = data.to_records()
index = data_interm.dtype.names
datestr += ','.join(index) + '\n'
#format data for uploading
for i in data_interm:
# Check if index is a date
if isinstance(i[0], datetime.datetime):
datestr += i[0].date().isoformat()
else:
try:
datestr += _parse_dates(str(i[0]))
except ValueError:
error = ("Please check your indices, one of them is "
"not a recognizable date")
raise DateNotRecognized(error)
for n in i:
if isinstance(n, (float, int)):
datestr += ',' + str(n)
datestr += '\n'
url = QUANDL_API_URL + 'datasets'
# Try to POST, will fail if dataset already exists
params = {'name': name,
'code': code,
'description': desc,
'data': datestr,
'auth_token': token}
resp = requests.post(url,params = params)
rtn = resp
if resp.status_code == 200 or resp.status_code == 422:
user_source_code = resp.json()['source_code']
#Catch failed POST and PUT instead to update
if resp.json()['errors']['code'] == [u'has already been taken']:
# make put request
url += "/{user_source_code}/{code}".format(**locals())
put_data = {'name':name,
'descripton':desc,
'data':datestr,
'auth_token':token
}
put_resp = requests.put(url,params =put_data)
rtn = put_resp
elif resp.status_code == 401:
error = ("Authtoken does not corrrospond to a Quandl Account"
" please make sure you are using the correct token")
raise TokenError(error)
elif resp.status_code in range(500,600):
error = ("Server Error. We apologize, something went wrong on our end. Please try call again in a minute, and email us at [email protected] if this error persists.")
raise QuandlError(error)
else:
error = ("Unknown response ({r}). Please copy and paste the error output, and the file you are running, and email us at [email protected]. We will help you fix your error ASAP.".format(str(resp.status_code)))
raise UnknownError(error)
#Return JSON response of uploaded dataset
return rtn
def search(query, source=None, page=1, authtoken=None, verbose=True, prints=None):
"""Return array of dictionaries of search results.
:param str query: (required), query to search with
:param str source: (optional), source to search
:param +'ve int: (optional), page number of search
:param str authotoken: (optional) Quandl auth token for extended API access
:returns: :array: search results
"""
if prints is not None:
print('Deprecated: "prints" is depreciated and will be removed in next release, use "verbose" instead.')
verbose = prints
token = _getauthtoken(authtoken, verbose)
search_url = 'https://www.quandl.com/api/v1/datasets.json?request_source=python&request_version=' + VERSION + '&query='
#parse query for proper API submission
parsedquery = re.sub(" ", "+", query)
parsedquery = re.sub("&", "+", parsedquery)
url = search_url + parsedquery
#Use authtoken if present
if token:
url += '&auth_token=' + token
#Add search source if given
if source:
url += '&source_code=' + source
#Page to be searched
url += '&page=' + str(page)
text= urlopen(url).read().decode("utf-8")
data = json.loads(text)
try:
datasets = data['docs']
except TypeError:
raise TypeError("There are no matches for this search")
datalist = []
for i in range(len(datasets)):
temp_dict ={}
temp_dict['name'] = datasets[i]['name']
temp_dict['code'] = datasets[i]['source_code'] + '/' + datasets[i]['code']
temp_dict['desc'] = datasets[i]['description']
temp_dict['freq'] = datasets[i]['frequency']
temp_dict['colname'] = datasets[i]['column_names']
datalist.append(temp_dict)
if verbose and i < 4:
print('{0:20} : {1:50}'.format('Name',temp_dict['name']))
print('{0:20} : {1:50}'.format('Quandl Code',temp_dict['code']))
print('{0:20} : {1:50}'.format('Description',temp_dict['desc']))
print('{0:20} : {1:50}'.format('Frequency',temp_dict['freq']))
print('{0:20} : {1:50}'.format('Column Names',str(temp_dict['colname'])))
print('\n\n')
return datalist
# format date, if None returns None
def _parse_dates(date):
if date is None:
return date
if isinstance(date, datetime.datetime):
return date.date().isoformat()
if isinstance(date, datetime.date):
return date.isoformat()
try:
date = parser.parse(date)
except ValueError:
raise ValueError("{} is not recognised a date.".format(date))
return date.date().isoformat()
# Download data into pandas dataframe
def _download(url):
dframe = pd.read_csv(url, index_col=0, parse_dates=True)
return dframe
#Push data to Quandl. Returns json of HTTP push.
def _htmlpush(url, raw_params):
page = url
params = urlencode(raw_params)
request = Request(page, params)
page = urlopen(request)
return json.loads(page.read())
#Test if code is capitalized alphanumeric
def _pushcodetest(code):
regex = re.compile('[^0-9A-Z_]')
if regex.search(code):
error = ("Your Quandl Code for uploaded data must consist of only "
"capital letters, underscores and numbers.")
raise CodeFormatError(error)
return code
def _getauthtoken(token,text):
"""Return and save API token to a pickle file for reuse."""
if token:
return token
try:
savedtoken = pickle.load(open('authtoken.p', 'rb'))
except IOError:
savedtoken = False
if token:
try:
pickle.dump(token, open('authtoken.p', 'wb'))
if text == "no" or text == False:
pass
else:
print("Token {} activated and saved for later use.".format(token))
except Exception as e:
print("Error writing token to cache: {}".format(str(e)))
elif not savedtoken and not token:
if text == "no" or text == False:
pass
else:
print("No authentication tokens found: usage will be limited.")
print("See www.quandl.com/api for more information.")
elif savedtoken and not token:
token = savedtoken
if text == "no" or text == False:
pass
else:
print("Using cached token {} for authentication.".format(token))
return token
# In lieu of urllib's urlencode, as this handles None values by ignoring them.
def _append_query_fields(url, **kwargs):
field_values = ['{0}={1}'.format(key, val)
for key, val in kwargs.items() if val]
return url + 'request_source=python&request_version='+ VERSION + '&' +'&'.join(field_values)
# Setup custom Exceptions
class WrongFormat(Exception):
"""Exception for dataset formatting errors"""
pass
class MultisetLimit(Exception):
"""Exception for calls exceeding the multiset limit"""
pass
class ParsingError(Exception):
"""Exception for I/O parsing errors"""
pass
class CallLimitExceeded(Exception):
"""Exception for daily call limit being exceeded"""
pass
class DatasetNotFound(Exception):
"""Exception for the dataset not being found"""
pass
class ErrorDownloading(Exception):
"""Catch all exception for download errors"""
pass
class MissingToken(Exception):
"""Exception when API token needed but missing"""
pass
class DateNotRecognized(Exception):
"""Exception when a date is not recognized as such"""
pass
class CodeFormatError(Exception):
"""Exception when a Quandl code is not formatted properly"""
pass
class QuandlError(Exception):
""" Exception when something is wrong with Quandl"""
pass
class TokenError(Exception):
"""Exception when incorrect API token is given"""
pass
class UnknownError(Exception):
"""Exception when an unknown error is passed"""
pass | gpl-2.0 | -78,809,971,617,870,180 | 34.689977 | 217 | 0.61032 | false |
ebuendia/ProyectoPython | src/xml-parser.py | 1 | 2674 | import re
from device import Device
from group import Group
from capability import Capability
def startDevices(line):
return re.match(r"<devices",line.strip()) != None
def beginDevice(line):
return re.match(r"<device",line.strip()) != None
def endDevice(line):
return re.match(r"</device>", line.strip()) != None
def beginGroup(line):
return re.match(r"<group", line.strip()) != None
def endGroup(line):
return re.match(r"</group>", line.strip()) != None
def beginCapability(line):
return re.match(r"<capability", line.strip()) != None
def endDevices(line):
return re.match(r"</devices>", line.strip()) != None
def deleteTags(line, tag, etag):
return line.strip().replace(tag,"").replace(etag,"")
def getAttrId(line):
return line.rsplit(" ")[0].replace("id=","").replace('"',"")
def getAttrUser(line):
return line.rsplit(" ")[1].replace("user_agent=","").replace('"',"")
def getAttrFall(line):
return line.rsplit(" ")[2].replace("fall_back=","").replace('"',"")
def getAttrName(line):
return line.rsplit(" ")[0].replace("name=","").replace('"',"")
def getAttrValue(line):
return line.rsplit(" ")[1].replace("value=","").replace('"',"")
# Funcion Principal
def main():
file = open("test.xml","r")
line = file.readline()
while not startDevices(line):
line = file.readline()
line = file.readline().strip()
devices = []
device = ""
group = ""
capability = ""
while not endDevices(line):
if beginDevice(line):
line = deleteTags(line,"<device ",">")
att_id = getAttrId(line)
att_user = getAttrUser(line)
att_fall = getAttrFall(line)
device = Device(att_id, att_user, att_fall)
line = file.readline()
if endDevice(line):
devices.append(device)
line = file.readline()
if beginGroup(line):
line = deleteTags(line,"<group ",">")
att_id = getAttrId(line)
group = Group(att_id)
group.setDevice(device)
line = file.readline()
if endGroup(line):
device.addGroup(group)
line = file.readline()
if beginCapability(line):
line = deleteTags(line, "<capability ", "/>")
att_name = getAttrName(line)
att_value = getAttrValue(line)
capability = Capability(att_name, att_value)
capability.setGroup(group)
group.addCapability(capability)
line = file.readline()
print "Devices\n"
printDevices(devices)
print "End Devices\n"
file.close()
return 0
def printDevices(list):
for device in list:
print device
printGroups(device)
def printGroups(device):
for group in device.getGroups():
print group
printCapabilities(group)
def printCapabilities(group):
for capability in group.getCapabilities():
print capability
if __name__ == '__main__':
main()
| unlicense | -1,180,188,492,776,922,400 | 21.470588 | 69 | 0.666791 | false |
google-research/disentanglement_lib | disentanglement_lib/evaluation/metrics/factor_vae.py | 1 | 8683 | # coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the disentanglement metric from the FactorVAE paper.
Based on "Disentangling by Factorising" (https://arxiv.org/abs/1802.05983).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from disentanglement_lib.evaluation.metrics import utils
import numpy as np
from six.moves import range
import gin.tf
@gin.configurable(
"factor_vae_score",
blacklist=["ground_truth_data", "representation_function", "random_state",
"artifact_dir"])
def compute_factor_vae(ground_truth_data,
representation_function,
random_state,
artifact_dir=None,
batch_size=gin.REQUIRED,
num_train=gin.REQUIRED,
num_eval=gin.REQUIRED,
num_variance_estimate=gin.REQUIRED):
"""Computes the FactorVAE disentanglement metric.
Args:
ground_truth_data: GroundTruthData to be sampled from.
representation_function: Function that takes observations as input and
outputs a dim_representation sized representation for each observation.
random_state: Numpy random state used for randomness.
artifact_dir: Optional path to directory where artifacts can be saved.
batch_size: Number of points to be used to compute the training_sample.
num_train: Number of points used for training.
num_eval: Number of points used for evaluation.
num_variance_estimate: Number of points used to estimate global variances.
Returns:
Dictionary with scores:
train_accuracy: Accuracy on training set.
eval_accuracy: Accuracy on evaluation set.
"""
del artifact_dir
logging.info("Computing global variances to standardise.")
global_variances = _compute_variances(ground_truth_data,
representation_function,
num_variance_estimate, random_state)
active_dims = _prune_dims(global_variances)
scores_dict = {}
if not active_dims.any():
scores_dict["train_accuracy"] = 0.
scores_dict["eval_accuracy"] = 0.
scores_dict["num_active_dims"] = 0
return scores_dict
logging.info("Generating training set.")
training_votes = _generate_training_batch(ground_truth_data,
representation_function, batch_size,
num_train, random_state,
global_variances, active_dims)
classifier = np.argmax(training_votes, axis=0)
other_index = np.arange(training_votes.shape[1])
logging.info("Evaluate training set accuracy.")
train_accuracy = np.sum(
training_votes[classifier, other_index]) * 1. / np.sum(training_votes)
logging.info("Training set accuracy: %.2g", train_accuracy)
logging.info("Generating evaluation set.")
eval_votes = _generate_training_batch(ground_truth_data,
representation_function, batch_size,
num_eval, random_state,
global_variances, active_dims)
logging.info("Evaluate evaluation set accuracy.")
eval_accuracy = np.sum(eval_votes[classifier,
other_index]) * 1. / np.sum(eval_votes)
logging.info("Evaluation set accuracy: %.2g", eval_accuracy)
scores_dict["train_accuracy"] = train_accuracy
scores_dict["eval_accuracy"] = eval_accuracy
scores_dict["num_active_dims"] = len(active_dims)
return scores_dict
@gin.configurable("prune_dims", blacklist=["variances"])
def _prune_dims(variances, threshold=0.):
"""Mask for dimensions collapsed to the prior."""
scale_z = np.sqrt(variances)
return scale_z >= threshold
def _compute_variances(ground_truth_data,
representation_function,
batch_size,
random_state,
eval_batch_size=64):
"""Computes the variance for each dimension of the representation.
Args:
ground_truth_data: GroundTruthData to be sampled from.
representation_function: Function that takes observation as input and
outputs a representation.
batch_size: Number of points to be used to compute the variances.
random_state: Numpy random state used for randomness.
eval_batch_size: Batch size used to eval representation.
Returns:
Vector with the variance of each dimension.
"""
observations = ground_truth_data.sample_observations(batch_size, random_state)
representations = utils.obtain_representation(observations,
representation_function,
eval_batch_size)
representations = np.transpose(representations)
assert representations.shape[0] == batch_size
return np.var(representations, axis=0, ddof=1)
def _generate_training_sample(ground_truth_data, representation_function,
batch_size, random_state, global_variances,
active_dims):
"""Sample a single training sample based on a mini-batch of ground-truth data.
Args:
ground_truth_data: GroundTruthData to be sampled from.
representation_function: Function that takes observation as input and
outputs a representation.
batch_size: Number of points to be used to compute the training_sample.
random_state: Numpy random state used for randomness.
global_variances: Numpy vector with variances for all dimensions of
representation.
active_dims: Indexes of active dimensions.
Returns:
factor_index: Index of factor coordinate to be used.
argmin: Index of representation coordinate with the least variance.
"""
# Select random coordinate to keep fixed.
factor_index = random_state.randint(ground_truth_data.num_factors)
# Sample two mini batches of latent variables.
factors = ground_truth_data.sample_factors(batch_size, random_state)
# Fix the selected factor across mini-batch.
factors[:, factor_index] = factors[0, factor_index]
# Obtain the observations.
observations = ground_truth_data.sample_observations_from_factors(
factors, random_state)
representations = representation_function(observations)
local_variances = np.var(representations, axis=0, ddof=1)
argmin = np.argmin(local_variances[active_dims] /
global_variances[active_dims])
return factor_index, argmin
def _generate_training_batch(ground_truth_data, representation_function,
batch_size, num_points, random_state,
global_variances, active_dims):
"""Sample a set of training samples based on a batch of ground-truth data.
Args:
ground_truth_data: GroundTruthData to be sampled from.
representation_function: Function that takes observations as input and
outputs a dim_representation sized representation for each observation.
batch_size: Number of points to be used to compute the training_sample.
num_points: Number of points to be sampled for training set.
random_state: Numpy random state used for randomness.
global_variances: Numpy vector with variances for all dimensions of
representation.
active_dims: Indexes of active dimensions.
Returns:
(num_factors, dim_representation)-sized numpy array with votes.
"""
votes = np.zeros((ground_truth_data.num_factors, global_variances.shape[0]),
dtype=np.int64)
for _ in range(num_points):
factor_index, argmin = _generate_training_sample(ground_truth_data,
representation_function,
batch_size, random_state,
global_variances,
active_dims)
votes[factor_index, argmin] += 1
return votes
| apache-2.0 | -9,107,700,491,507,206,000 | 42.415 | 80 | 0.657031 | false |
uclouvain/OSIS-Louvain | base/tests/business/education_groups/test_delete.py | 1 | 8462 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.test import TestCase
from base.business.education_groups import delete
from base.models.education_group_year import EducationGroupYear
from base.models.enums.education_group_types import GroupType
from base.models.group_element_year import GroupElementYear
from base.tests.factories.academic_year import AcademicYearFactory
from base.tests.factories.authorized_relationship import AuthorizedRelationshipFactory
from base.tests.factories.education_group_year import TrainingFactory, GroupFactory
from base.tests.factories.group_element_year import GroupElementYearFactory
class TestHaveContents(TestCase):
@classmethod
def setUpTestData(cls):
cls.academic_year = AcademicYearFactory(year=2019)
def test_have_contents_case_no_contents(self):
education_group_year = TrainingFactory(academic_year=self.academic_year)
self.assertFalse(delete._have_contents_which_are_not_mandatory(education_group_year))
def test_have_contents_case_no_contents_which_because_mandatory_structure(self):
"""
In this test, we ensure that all of his children are mandatory groups and they are empty.
It must be consider as empty
"""
education_group_year = TrainingFactory(academic_year=self.academic_year)
for education_group_type in [GroupType.COMMON_CORE.name, GroupType.FINALITY_120_LIST_CHOICE.name]:
child = GroupFactory(academic_year=self.academic_year, education_group_type__name=education_group_type)
AuthorizedRelationshipFactory(
parent_type=education_group_year.education_group_type,
child_type=child.education_group_type,
min_count_authorized=1,
)
GroupElementYearFactory(parent=education_group_year, child_branch=child)
self.assertFalse(delete._have_contents_which_are_not_mandatory(education_group_year))
def test_have_contents_case_have_contents_because_mandatory_structure_is_present_multiple_times(self):
"""
In this test, we ensure that we have two elements of one type which are mandatory in the basic structure.
==> We must consider as it have contents
"""
education_group_year = TrainingFactory(academic_year=self.academic_year)
subgroup_1 = GroupFactory(academic_year=self.academic_year, education_group_type__name=GroupType.SUB_GROUP.name)
GroupElementYearFactory(parent=education_group_year, child_branch=subgroup_1)
subgroup_2 = GroupFactory(
academic_year=self.academic_year,
education_group_type=subgroup_1.education_group_type,
)
GroupElementYearFactory(parent=education_group_year, child_branch=subgroup_2)
AuthorizedRelationshipFactory(
parent_type=education_group_year.education_group_type,
child_type=subgroup_1.education_group_type,
min_count_authorized=1,
)
self.assertTrue(delete._have_contents_which_are_not_mandatory(education_group_year))
def test_have_contents_case_contents_because_structure_have_child_which_are_not_mandatory(self):
"""
In this test, we ensure that at least one children are not mandatory groups so they must not be considered
as empty
"""
education_group_year = TrainingFactory(academic_year=self.academic_year)
child_mandatory = GroupFactory(academic_year=self.academic_year)
AuthorizedRelationshipFactory(
parent_type=education_group_year.education_group_type,
child_type=child_mandatory.education_group_type,
min_count_authorized=1
)
GroupElementYearFactory(parent=education_group_year, child_branch=child_mandatory)
child_no_mandatory = GroupFactory(academic_year=self.academic_year)
AuthorizedRelationshipFactory(
parent_type=education_group_year.education_group_type,
child_type=child_mandatory.education_group_type,
min_count_authorized=0
)
GroupElementYearFactory(parent=education_group_year, child_branch=child_no_mandatory)
self.assertTrue(delete._have_contents_which_are_not_mandatory(education_group_year))
class TestRunDelete(TestCase):
@classmethod
def setUpTestData(cls):
cls.academic_year = AcademicYearFactory(year=2019)
def test_delete_case_no_mandatory_structure(self):
education_group_year = TrainingFactory(academic_year=self.academic_year)
delete.start(education_group_year)
with self.assertRaises(EducationGroupYear.DoesNotExist):
EducationGroupYear.objects.get(pk=education_group_year.pk)
def test_delete_case_remove_mandatory_structure(self):
education_group_year = TrainingFactory(academic_year=self.academic_year)
child_mandatory = GroupFactory(
academic_year=self.academic_year,
education_group_type__name=GroupType.COMMON_CORE.name
)
AuthorizedRelationshipFactory(
parent_type=education_group_year.education_group_type,
child_type=child_mandatory.education_group_type,
min_count_authorized=1,
)
link_parent_child = GroupElementYearFactory(parent=education_group_year, child_branch=child_mandatory)
delete.start(education_group_year)
with self.assertRaises(EducationGroupYear.DoesNotExist):
EducationGroupYear.objects.get(pk=education_group_year.pk)
with self.assertRaises(EducationGroupYear.DoesNotExist):
EducationGroupYear.objects.get(pk=child_mandatory.pk)
with self.assertRaises(GroupElementYear.DoesNotExist):
GroupElementYear.objects.get(pk=link_parent_child.pk)
def test_delete_case_remove_mandatory_structure_case_reused_item_which_are_mandatory(self):
"""
In this test, we ensure that the mandatory elem is not removed if it is reused in another structure
"""
education_group_year = TrainingFactory(academic_year=self.academic_year)
child_mandatory = GroupFactory(
academic_year=self.academic_year,
education_group_type__name=GroupType.COMMON_CORE.name
)
AuthorizedRelationshipFactory(
parent_type=education_group_year.education_group_type,
child_type=child_mandatory.education_group_type,
min_count_authorized=1,
)
link_parent_child = GroupElementYearFactory(parent=education_group_year, child_branch=child_mandatory)
# Create another training
another_training = TrainingFactory(academic_year=self.academic_year)
GroupElementYearFactory(parent=another_training, child_branch=child_mandatory)
delete.start(education_group_year)
with self.assertRaises(EducationGroupYear.DoesNotExist):
EducationGroupYear.objects.get(pk=education_group_year.pk)
with self.assertRaises(GroupElementYear.DoesNotExist):
GroupElementYear.objects.get(pk=link_parent_child.pk)
self.assertEqual(
child_mandatory,
EducationGroupYear.objects.get(pk=child_mandatory.pk)
)
| agpl-3.0 | 1,811,763,372,489,224,200 | 47.348571 | 120 | 0.699326 | false |
dipapaspyros/bdo_platform | aggregator/migrations/0015_auto_20180915_2057.py | 2 | 1556 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-09-15 17:57
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('aggregator', '0014_auto_20180913_1531'),
]
operations = [
migrations.CreateModel(
name='DatasetAccess',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start', models.DateField()),
('end', models.DateField()),
('valid', models.BooleanField()),
],
),
migrations.RemoveField(
model_name='dataset',
name='dataset_user',
),
migrations.AddField(
model_name='datasetaccess',
name='dataset',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='aggregator.Dataset'),
),
migrations.AddField(
model_name='datasetaccess',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='dataset',
name='access_list',
field=models.ManyToManyField(through='aggregator.DatasetAccess', to=settings.AUTH_USER_MODEL),
),
]
| mit | 1,340,202,843,143,090,000 | 32.826087 | 114 | 0.587404 | false |
eirannejad/pyRevit | extensions/pyRevitTools.extension/pyRevit.tab/Modify.panel/edit3.stack/Groups.pulldown/Show Nested Group Structure.pushbutton/script.py | 1 | 2660 | # -*- coding: utf-8 -*-
"""List the nested group structure around the selected group or element."""
from pyrevit import revit, DB
from pyrevit import script
output = script.get_output()
selection = revit.get_selection()
class GroupNode:
def __init__(self, group_element, par=None):
self.group = group_element
self.subgroups = self.find_subgroups()
@property
def name(self):
return self.group.Name
@property
def id(self):
return self.group.Id
@property
def members(self):
return [revit.doc.GetElement(x) for x in self.group.GetMemberIds()]
def find_subgroups(self):
subgrps = []
for mem in self.members:
if isinstance(mem, DB.Group):
subgrps.append(GroupNode(mem))
return subgrps
def __len__(self):
return len(self.subgroups)
def __iter__(self):
return self.subgroups
def __repr__(self):
return '<{} name:{}>'.format(self.__class__.__name__, self.name)
def print_tree(groupnode, level, trunk='', branch=''):
"""recursive method for printing (nested) group structure"""
inset = '\t'
fruit = \
branch + '■ {name} {id}'\
.format(name=groupnode.name, id=output.linkify(groupnode.id))
if groupnode.id in selection.element_ids:
print(fruit + '\t<<< selected group element')
elif any([x in selection.element_ids
for x in [y.Id for y in groupnode.members
if not isinstance(y, DB.Group)]]):
print(fruit + '\t<<< selected group members')
else:
print(fruit)
count = len(groupnode)
for idx, sub_grp in enumerate(groupnode):
last = idx == count - 1
if last:
sub_grp_trunk = trunk + inset + ' '
sub_grp_branch = trunk + inset + '└──'
else:
sub_grp_trunk = trunk + inset + '│'
sub_grp_branch = trunk + inset + '├──'
print_tree(sub_grp, level + 1, sub_grp_trunk, sub_grp_branch)
# inspect the selection and find first parents
parent_groups = []
if not selection.is_empty:
for element in selection.elements:
if hasattr(element, 'GroupId'):
firstparent = element
while firstparent.GroupId != DB.ElementId.InvalidElementId:
firstparent = revit.doc.GetElement(firstparent.GroupId)
if isinstance(firstparent, DB.Group):
parent_groups.append(GroupNode(firstparent))
# print group structure for all discovered parent groups
for parent_grp in parent_groups:
print_tree(parent_grp, 0)
print('\n\n')
| gpl-3.0 | 4,766,360,654,464,250,000 | 27.728261 | 78 | 0.595535 | false |
imito/odin | odin/ml/scoring.py | 1 | 10299 | from __future__ import print_function, division, absolute_import
import numpy as np
from scipy.linalg import eigh, cholesky, inv, svd, solve
import tensorflow as tf
from sklearn.svm import SVC
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from odin.backend import length_norm, calc_white_mat
from odin.ml.base import BaseEstimator, TransformerMixin, Evaluable
# ===========================================================================
# Cosine Scoring
# ===========================================================================
def compute_class_avg(X, y, classes, sorting=True):
""" compute average vector for each class
Parameters
----------
X: [nb_samples, feat_dim]
y: [nb_samples]
classes: [nb_classes]
assumed numerical classes
sorting: bool
if True, sort the `classes` by numerical order (from small to large)
Return
------
[nb_classes, feat_dim]
Note
----
The given order of each class in `classes` will determine
the row order of returned matrix
"""
if sorting:
classes = sorted(classes, reverse=False)
return np.concatenate([np.mean(X[y == i], axis=0, keepdims=True)
for i in classes],
axis=0)
def compute_within_cov(X, y, classes=None, class_avg=None):
""" Compute the within-classes covariance matrix
Parameters
----------
X : [nb_samples, feat_dim]
y : [nb_samples]
classes : [nb_classes]
assumed numerical classes
class_avg : [nb_classes, feat_dim]
concatenated average vector of each class
Return
------
[feat_dim, feat_dim]
Note
----
The given order of each class in `classes` will determine
the row order of returned matrix
"""
if classes is None and class_avg is None:
raise ValueError("`classes` and `class_avg` cannot be None together")
if classes is not None:
class_avg = compute_class_avg(X, y, classes, sorting=True)
X_mu = X - class_avg[y]
Sw = np.cov(X_mu.T)
return Sw
def compute_wccn(X, y, classes=None, class_avg=None):
""" Within class covariance normalization
Parameters
----------
X : [nb_samples, feat_dim]
y : [nb_samples]
classes : [nb_classes]
assumed numerical classes
class_avg : [nb_classes, feat_dim]
concatenated average vector of each class
Return
------
w: [feat_dim, feat_dim]
where X_norm = dot(X, w)
"""
if classes is None and class_avg is None:
raise ValueError("`classes` and `class_avg` cannot be None together")
Sw = compute_within_cov(X, y, classes, class_avg)
Sw = Sw + 1e-6 * np.eye(Sw.shape[0])
return calc_white_mat(Sw)
class VectorNormalizer(BaseEstimator, TransformerMixin):
""" Perform of sequence of normalization as following
-> Centering: Substract sample mean
-> Whitening: using within-class-covariance-normalization
-> Applying LDA (optional)
-> Length normalization
Parameters
----------
centering : bool (default: True)
mean normalized the vectors
wccn : bool (default: True)
within class covariance normalization
lda : bool (default: True)
Linear Discriminant Analysis
concat : bool (default: False)
concatenate original vector to the normalized vector
Return
------
[nb_samples, feat_dim] if `lda=False`
[nb_samples, nb_classes - 1] if `lda=True` and `concat=False`
[nb_samples, feat_dim + nb_classes - 1] if `lda=True` and `concat=True`
"""
def __init__(self, centering=True, wccn=False, unit_length=True,
lda=False, concat=False):
super(VectorNormalizer, self).__init__()
self._centering = bool(centering)
self._unit_length = bool(unit_length)
self._wccn = bool(wccn)
self._lda = LinearDiscriminantAnalysis() if bool(lda) else None
self._feat_dim = None
self._concat = bool(concat)
# ==================== properties ==================== #
@property
def feat_dim(self):
return self._feat_dim
@property
def is_initialized(self):
return self._feat_dim is not None
@property
def is_fitted(self):
return hasattr(self, '_W')
@property
def enroll_vecs(self):
return self._enroll_vecs
@property
def mean(self):
""" global mean vector """
return self._mean
@property
def vmin(self):
return self._vmin
@property
def vmax(self):
return self._vmax
@property
def W(self):
return self._W
@property
def lda(self):
return self._lda
# ==================== sklearn ==================== #
def _initialize(self, X, y):
if not self.is_initialized:
self._feat_dim = X.shape[1]
assert self._feat_dim == X.shape[1]
if isinstance(y, (tuple, list)):
y = np.asarray(y)
if y.ndim == 2:
y = np.argmax(y, axis=-1)
return y, np.unique(y)
def normalize(self, X, concat=None):
"""
Parameters
----------
X : array [nb_samples, feat_dim]
concat : {None, True, False}
if not None, override the default `concat` attribute of
this `VectorNormalizer`
"""
if not self.is_fitted:
raise RuntimeError("VectorNormalizer has not been fitted.")
if concat is None:
concat = self._concat
if concat:
X_org = X[:] if not isinstance(X, np.ndarray) else X
else:
X_org = None
# ====== normalizing ====== #
if self._centering:
X = X - self._mean
if self._wccn:
X = np.dot(X, self.W)
# ====== LDA ====== #
if self._lda is not None:
X_lda = self._lda.transform(X) # [nb_classes, nb_classes - 1]
# concat if necessary
if concat:
X = np.concatenate((X_lda, X_org), axis=-1)
else:
X = X_lda
# ====== unit length normalization ====== #
if self._unit_length:
X = length_norm(X, axis=-1, ord=2)
return X
def fit(self, X, y):
y, classes = self._initialize(X, y)
# ====== compute classes' average ====== #
enroll = compute_class_avg(X, y, classes, sorting=True)
M = X.mean(axis=0).reshape(1, -1)
self._mean = M
if self._centering:
X = X - M
# ====== WCCN ====== #
if self._wccn:
W = compute_wccn(X, y, classes=None, class_avg=enroll) # [feat_dim, feat_dim]
else:
W = 1
self._W = W
# ====== preprocess ====== #
# whitening the data
if self._wccn:
X = np.dot(X, W)
# length normalization
if self._unit_length:
X = length_norm(X, axis=-1)
# linear discriminant analysis
if self._lda is not None:
self._lda.fit(X, y) # [nb_classes, nb_classes - 1]
# ====== enroll vecs ====== #
self._enroll_vecs = self.normalize(enroll, concat=False)
# ====== max min ====== #
if self._lda is not None:
X = self._lda.transform(X)
X = length_norm(X, axis=-1, ord=2)
vmin = X.min(0, keepdims=True)
vmax = X.max(0, keepdims=True)
self._vmin, self._vmax = vmin, vmax
return self
def transform(self, X):
return self.normalize(X)
class Scorer(BaseEstimator, TransformerMixin, Evaluable):
""" Scorer
Parameters
----------
centering : bool (default: True)
mean normalized the vectors
wccn : bool (default: True)
within class covariance normalization
lda : bool (default: True)
Linear Discriminant Analysis
concat : bool (default: False)
concatenate original vector to the normalized vector
method : {'cosine', 'svm'}
method for scoring
"""
def __init__(self, centering=True, wccn=True, lda=True, concat=False,
method='cosine', labels=None):
super(Scorer, self).__init__()
self._normalizer = VectorNormalizer(
centering=centering, wccn=wccn, lda=lda, concat=concat)
self._labels = labels
method = str(method).lower()
if method not in ('cosine', 'svm'):
raise ValueError('`method` must be one of the following: cosine, svm; '
'but given: "%s"' % method)
self._method = method
# ==================== properties ==================== #
@property
def method(self):
return self._method
@property
def feat_dim(self):
return self._normalizer.feat_dim
@property
def labels(self):
return self._labels
@property
def nb_classes(self):
return len(self._labels)
@property
def is_initialized(self):
return self._normalizer.is_initialized
@property
def is_fitted(self):
return self._normalizer.is_fitted
@property
def normalizer(self):
return self._normalizer
@property
def lda(self):
return self._normalizer.lda
# ==================== sklearn ==================== #
def fit(self, X, y):
# ====== preprocessing ====== #
if isinstance(X, (tuple, list)):
X = np.asarray(X)
if isinstance(y, (tuple, list)):
y = np.asarray(y)
# ====== vector normalizer ====== #
self._normalizer.fit(X, y)
if self._labels is None:
if y.ndim >= 2:
y = np.argmax(y, axis=-1)
self._labels = np.unique(y)
# ====== for SVM method ====== #
if self.method == 'svm':
X = self._normalizer.transform(X)
# normalize to [0, 1]
X = 2 * (X - self._normalizer.vmin) /\
(self._normalizer.vmax - self._normalizer.vmin) - 1
self._svm = SVC(C=1, kernel='rbf', gamma='auto', coef0=1,
shrinking=True, random_state=0,
probability=True, tol=1e-3,
cache_size=1e4, class_weight='balanced')
self._svm.fit(X, y)
self.predict_proba = self._predict_proba
return self
def _predict_proba(self, X):
if self.method != 'svm':
raise RuntimeError("`predict_proba` only for 'svm' method")
return self._svm.predict_proba(self._normalizer.transform(X))
def predict_log_proba(self, X):
return self.transform(X)
def transform(self, X):
# [nb_samples, nb_classes - 1] (if LDA applied)
X = self._normalizer.transform(X)
# ====== cosine scoring ====== #
if self.method == 'cosine':
# [nb_classes, nb_classes - 1]
model_ivectors = self._normalizer.enroll_vecs
test_ivectors = X
scores = np.dot(test_ivectors, model_ivectors.T)
# ====== svm ====== #
elif self.method == 'svm':
X = 2 * (X - self._normalizer.vmin) /\
(self._normalizer.vmax - self._normalizer.vmin) - 1
scores = self._svm.predict_log_proba(X)
return scores
| mit | -2,396,844,634,403,191,000 | 27.216438 | 83 | 0.592873 | false |
yephper/django | django/contrib/auth/views.py | 1 | 12821 | import functools
import warnings
from django.conf import settings
# Avoid shadowing the login() and logout() views below.
from django.contrib.auth import (
REDIRECT_FIELD_NAME, get_user_model, login as auth_login,
logout as auth_logout, update_session_auth_hash,
)
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, PasswordResetForm, SetPasswordForm,
)
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.http import HttpResponseRedirect, QueryDict
from django.shortcuts import resolve_url
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.http import is_safe_url, urlsafe_base64_decode
from django.utils.six.moves.urllib.parse import urlparse, urlunparse
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
def deprecate_current_app(func):
"""
Handle deprecation of the current_app parameter of the views.
"""
@functools.wraps(func)
def inner(*args, **kwargs):
if 'current_app' in kwargs:
warnings.warn(
"Passing `current_app` as a keyword argument is deprecated. "
"Instead the caller of `{0}` should set "
"`request.current_app`.".format(func.__name__),
RemovedInDjango20Warning
)
current_app = kwargs.pop('current_app')
request = kwargs.get('request', None)
if request and current_app is not None:
request.current_app = current_app
return func(*args, **kwargs)
return inner
def _get_login_redirect_url(request, redirect_to):
# Ensure the user-originating redirection URL is safe.
if not is_safe_url(url=redirect_to, host=request.get_host()):
return resolve_url(settings.LOGIN_REDIRECT_URL)
return redirect_to
@deprecate_current_app
@sensitive_post_parameters()
@csrf_protect
@never_cache
def login(request, template_name='registration/login.html',
redirect_field_name=REDIRECT_FIELD_NAME,
authentication_form=AuthenticationForm,
extra_context=None, redirect_authenticated_user=False):
"""
Displays the login form and handles the login action.
"""
redirect_to = request.POST.get(redirect_field_name, request.GET.get(redirect_field_name, ''))
if redirect_authenticated_user and request.user.is_authenticated():
redirect_to = _get_login_redirect_url(request, redirect_to)
if redirect_to == request.path:
raise ValueError(
"Redirection loop for authenticated user detected. Check that "
"your LOGIN_REDIRECT_URL doesn't point to a login page."
)
return HttpResponseRedirect(redirect_to)
elif request.method == "POST":
form = authentication_form(request, data=request.POST)
if form.is_valid():
auth_login(request, form.get_user())
return HttpResponseRedirect(_get_login_redirect_url(request, redirect_to))
else:
form = authentication_form(request)
current_site = get_current_site(request)
context = {
'form': form,
redirect_field_name: redirect_to,
'site': current_site,
'site_name': current_site.name,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@deprecate_current_app
@never_cache
def logout(request, next_page=None,
template_name='registration/logged_out.html',
redirect_field_name=REDIRECT_FIELD_NAME,
extra_context=None):
"""
Logs out the user and displays 'You are logged out' message.
"""
auth_logout(request)
if next_page is not None:
next_page = resolve_url(next_page)
elif settings.LOGOUT_REDIRECT_URL:
next_page = resolve_url(settings.LOGOUT_REDIRECT_URL)
if (redirect_field_name in request.POST or
redirect_field_name in request.GET):
next_page = request.POST.get(redirect_field_name,
request.GET.get(redirect_field_name))
# Security check -- don't allow redirection to a different host.
if not is_safe_url(url=next_page, host=request.get_host()):
next_page = request.path
if next_page:
# Redirect to this page until the session has been cleared.
return HttpResponseRedirect(next_page)
current_site = get_current_site(request)
context = {
'site': current_site,
'site_name': current_site.name,
'title': _('Logged out')
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@deprecate_current_app
def logout_then_login(request, login_url=None, extra_context=None):
"""
Logs out the user if they are logged in. Then redirects to the log-in page.
"""
if not login_url:
login_url = settings.LOGIN_URL
login_url = resolve_url(login_url)
return logout(request, login_url, extra_context=extra_context)
def redirect_to_login(next, login_url=None,
redirect_field_name=REDIRECT_FIELD_NAME):
"""
Redirects the user to the login page, passing the given 'next' page
"""
resolved_url = resolve_url(login_url or settings.LOGIN_URL)
login_url_parts = list(urlparse(resolved_url))
if redirect_field_name:
querystring = QueryDict(login_url_parts[4], mutable=True)
querystring[redirect_field_name] = next
login_url_parts[4] = querystring.urlencode(safe='/')
return HttpResponseRedirect(urlunparse(login_url_parts))
# 4 views for password reset:
# - password_reset sends the mail
# - password_reset_done shows a success message for the above
# - password_reset_confirm checks the link the user clicked and
# prompts for a new password
# - password_reset_complete shows a success message for the above
@deprecate_current_app
@csrf_protect
def password_reset(request,
template_name='registration/password_reset_form.html',
email_template_name='registration/password_reset_email.html',
subject_template_name='registration/password_reset_subject.txt',
password_reset_form=PasswordResetForm,
token_generator=default_token_generator,
post_reset_redirect=None,
from_email=None,
extra_context=None,
html_email_template_name=None,
extra_email_context=None):
if post_reset_redirect is None:
post_reset_redirect = reverse('password_reset_done')
else:
post_reset_redirect = resolve_url(post_reset_redirect)
if request.method == "POST":
form = password_reset_form(request.POST)
if form.is_valid():
opts = {
'use_https': request.is_secure(),
'token_generator': token_generator,
'from_email': from_email,
'email_template_name': email_template_name,
'subject_template_name': subject_template_name,
'request': request,
'html_email_template_name': html_email_template_name,
'extra_email_context': extra_email_context,
}
form.save(**opts)
return HttpResponseRedirect(post_reset_redirect)
else:
form = password_reset_form()
context = {
'form': form,
'title': _('Password reset'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@deprecate_current_app
def password_reset_done(request,
template_name='registration/password_reset_done.html',
extra_context=None):
context = {
'title': _('Password reset sent'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
# Doesn't need csrf_protect since no-one can guess the URL
@sensitive_post_parameters()
@never_cache
@deprecate_current_app
def password_reset_confirm(request, uidb64=None, token=None,
template_name='registration/password_reset_confirm.html',
token_generator=default_token_generator,
set_password_form=SetPasswordForm,
post_reset_redirect=None,
extra_context=None):
"""
View that checks the hash in a password reset link and presents a
form for entering a new password.
"""
UserModel = get_user_model()
assert uidb64 is not None and token is not None # checked by URLconf
if post_reset_redirect is None:
post_reset_redirect = reverse('password_reset_complete')
else:
post_reset_redirect = resolve_url(post_reset_redirect)
try:
# urlsafe_base64_decode() decodes to bytestring on Python 3
uid = force_text(urlsafe_base64_decode(uidb64))
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
if user is not None and token_generator.check_token(user, token):
validlink = True
title = _('Enter new password')
if request.method == 'POST':
form = set_password_form(user, request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(post_reset_redirect)
else:
form = set_password_form(user)
else:
validlink = False
form = None
title = _('Password reset unsuccessful')
context = {
'form': form,
'title': title,
'validlink': validlink,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@deprecate_current_app
def password_reset_complete(request,
template_name='registration/password_reset_complete.html',
extra_context=None):
context = {
'login_url': resolve_url(settings.LOGIN_URL),
'title': _('Password reset complete'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@sensitive_post_parameters()
@csrf_protect
@login_required
@deprecate_current_app
def password_change(request,
template_name='registration/password_change_form.html',
post_change_redirect=None,
password_change_form=PasswordChangeForm,
extra_context=None):
if post_change_redirect is None:
post_change_redirect = reverse('password_change_done')
else:
post_change_redirect = resolve_url(post_change_redirect)
if request.method == "POST":
form = password_change_form(user=request.user, data=request.POST)
if form.is_valid():
form.save()
# Updating the password logs out all other sessions for the user
# except the current one.
update_session_auth_hash(request, form.user)
return HttpResponseRedirect(post_change_redirect)
else:
form = password_change_form(user=request.user)
context = {
'form': form,
'title': _('Password change'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@login_required
@deprecate_current_app
def password_change_done(request,
template_name='registration/password_change_done.html',
extra_context=None):
context = {
'title': _('Password change successful'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
| bsd-3-clause | -5,530,483,616,621,971,000 | 35.488304 | 97 | 0.625614 | false |
rascul/botwot | plugins/urltitle.py | 1 | 1618 | """ Url Title Plugin (botwot plugins.urltitle) """
# Copyright 2014 Ray Schulz <https://rascul.io>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from bs4 import BeautifulSoup
import requests
from pyaib.plugins import observe, plugin_class
@plugin_class
class UrlTitle(object):
def __init__(self, context, config):
pass
@observe("IRC_MSG_PRIVMSG")
def observe_privmsg(self, context, msg):
""" Look up HTML titles for URLs """
m = re.match(r'(?P<url>https?://\S*)', msg.message)
if m:
# Grab the URL
url = m.groupdict().get("url")
# Make sure url has http:// or https://
if not url.startswith("http://") and not url.startswith("https://"):
url = "http://%s" % url
# Get the page and parse it for title and meta description
try:
page = requests.get(url)
except (requests.exceptions.ConnectionError, requests.exceptions.InvalidURL):
return
if page and page.status_code < 400:
soup = BeautifulSoup(page.text)
if soup and soup.title:
title = soup.title.string[:256]
if title:
msg.reply("%s: %s" % (msg.sender, title))
| apache-2.0 | -6,002,606,328,788,732,000 | 27.892857 | 80 | 0.687268 | false |
pgroudas/pants | src/python/pants/backend/jvm/tasks/jvm_compile/jvm_compile.py | 1 | 16630 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import itertools
import sys
from abc import abstractmethod
from collections import defaultdict
from pants.backend.core.tasks.group_task import GroupMember
from pants.backend.jvm.tasks.jvm_compile.jvm_compile_global_strategy import JvmCompileGlobalStrategy
from pants.backend.jvm.tasks.jvm_compile.jvm_compile_isolated_strategy import \
JvmCompileIsolatedStrategy
from pants.backend.jvm.tasks.jvm_compile.jvm_dependency_analyzer import JvmDependencyAnalyzer
from pants.backend.jvm.tasks.jvm_compile.jvm_fingerprint_strategy import JvmFingerprintStrategy
from pants.backend.jvm.tasks.nailgun_task import NailgunTaskBase
from pants.goal.products import MultipleRootedProducts
from pants.option.options import Options
from pants.reporting.reporting_utils import items_to_report_element
class JvmCompile(NailgunTaskBase, GroupMember):
"""A common framework for JVM compilation.
To subclass for a specific JVM language, implement the static values and methods
mentioned below under "Subclasses must implement".
"""
@classmethod
def register_options(cls, register):
super(JvmCompile, cls).register_options(register)
register('--partition-size-hint', type=int, default=sys.maxint, metavar='<# source files>',
help='Roughly how many source files to attempt to compile together. Set to a large '
'number to compile all sources together. Set to 0 to compile target-by-target.')
register('--jvm-options', type=Options.list,
help='Run the compiler with these JVM options.')
register('--args', action='append', default=list(cls.get_args_default(register.bootstrap)),
help='Pass these args to the compiler.')
register('--confs', type=Options.list, default=['default'],
help='Compile for these Ivy confs.')
# TODO: Stale analysis should be automatically ignored via Task identities:
# https://github.com/pantsbuild/pants/issues/1351
register('--clear-invalid-analysis', default=False, action='store_true',
advanced=True,
help='When set, any invalid/incompatible analysis files will be deleted '
'automatically. When unset, an error is raised instead.')
register('--warnings', default=True, action='store_true',
help='Compile with all configured warnings enabled.')
register('--warning-args', action='append', default=list(cls.get_warning_args_default()),
advanced=True,
help='Extra compiler args to use when warnings are enabled.')
register('--no-warning-args', action='append', default=list(cls.get_no_warning_args_default()),
advanced=True,
help='Extra compiler args to use when warnings are disabled.')
register('--strategy', choices=['global', 'isolated'], default='global',
help='Selects the compilation strategy to use. The "global" strategy uses a shared '
'global classpath for all compiled classes, and the "isolated" strategy uses '
'per-target classpaths.')
JvmCompileGlobalStrategy.register_options(register, cls._language, cls._supports_concurrent_execution)
JvmCompileIsolatedStrategy.register_options(register, cls._language, cls._supports_concurrent_execution)
@classmethod
def product_types(cls):
return ['classes_by_target', 'classes_by_source', 'resources_by_target']
@classmethod
def prepare(cls, options, round_manager):
super(JvmCompile, cls).prepare(options, round_manager)
# This task uses JvmDependencyAnalyzer as a helper, get its product needs
JvmDependencyAnalyzer.prepare(options, round_manager)
round_manager.require_data('compile_classpath')
round_manager.require_data('ivy_resolve_symlink_map')
# Require codegen we care about
# TODO(John Sirois): roll this up in Task - if the list of labels we care about for a target
# predicate to filter the full build graph is exposed, the requirement can be made automatic
# and in turn codegen tasks could denote the labels they produce automating wiring of the
# produce side
round_manager.require_data('java')
round_manager.require_data('scala')
# Allow the deferred_sources_mapping to take place first
round_manager.require_data('deferred_sources')
# Subclasses must implement.
# --------------------------
_language = None
_file_suffix = None
_supports_concurrent_execution = None
@classmethod
def name(cls):
return cls._language
@classmethod
def get_args_default(cls, bootstrap_option_values):
"""Override to set default for --args option.
:param bootstrap_option_values: The values of the "bootstrap options" (e.g., pants_workdir).
Implementations can use these when generating the default.
See src/python/pants/options/options_bootstrapper.py for
details.
"""
return ()
@classmethod
def get_warning_args_default(cls):
"""Override to set default for --warning-args option."""
return ()
@classmethod
def get_no_warning_args_default(cls):
"""Override to set default for --no-warning-args option."""
return ()
@property
def config_section(self):
return self.options_scope
def select(self, target):
return target.has_sources(self._file_suffix)
def create_analysis_tools(self):
"""Returns an AnalysisTools implementation.
Subclasses must implement.
"""
raise NotImplementedError()
def compile(self, args, classpath, sources, classes_output_dir, upstream_analysis, analysis_file):
"""Invoke the compiler.
Must raise TaskError on compile failure.
Subclasses must implement."""
raise NotImplementedError()
# Subclasses may override.
# ------------------------
def extra_compile_time_classpath_elements(self):
"""Extra classpath elements common to all compiler invocations.
E.g., jars for compiler plugins.
These are added at the end of the classpath, after any dependencies, so that if they
overlap with any explicit dependencies, the compiler sees those first. This makes
missing dependency accounting much simpler.
"""
return []
def extra_products(self, target):
"""Any extra, out-of-band resources created for a target.
E.g., targets that produce scala compiler plugins or annotation processor files
produce an info file. The resources will be added to the compile_classpath, and
made available in resources_by_target.
Returns a list of pairs (root, [absolute paths of files under root]).
"""
return []
def __init__(self, *args, **kwargs):
super(JvmCompile, self).__init__(*args, **kwargs)
# JVM options for running the compiler.
self._jvm_options = self.get_options().jvm_options
self._args = list(self.get_options().args)
if self.get_options().warnings:
self._args.extend(self.get_options().warning_args)
else:
self._args.extend(self.get_options().no_warning_args)
self.setup_artifact_cache()
# The ivy confs for which we're building.
self._confs = self.get_options().confs
# The compile strategy to use for analysis and classfile placement.
if self.get_options().strategy == 'global':
strategy_constructor = JvmCompileGlobalStrategy
else:
assert self.get_options().strategy == 'isolated'
strategy_constructor = JvmCompileIsolatedStrategy
self._strategy = strategy_constructor(self.context,
self.get_options(),
self.workdir,
self.create_analysis_tools(),
self._language,
lambda s: s.endswith(self._file_suffix))
def _jvm_fingerprint_strategy(self):
# Use a fingerprint strategy that allows us to also include java/scala versions.
return JvmFingerprintStrategy(self._platform_version_info())
def _platform_version_info(self):
return [self._strategy.name()] + self._language_platform_version_info()
@abstractmethod
def _language_platform_version_info(self):
"""
Provides extra platform information such as java version that will be used
in the fingerprinter. This in turn ensures different platform versions create different
cache artifacts.
Subclasses must override this and return a list of version info.
"""
pass
def pre_execute(self):
# Only create these working dirs during execution phase, otherwise, they
# would be wiped out by clean-all goal/task if it's specified.
self._strategy.pre_compile()
# TODO(John Sirois): Ensuring requested product maps are available - if empty - should probably
# be lifted to Task infra.
# In case we have no relevant targets and return early create the requested product maps.
self._create_empty_products()
def prepare_execute(self, chunks):
targets_in_chunks = list(itertools.chain(*chunks))
# Invoke the strategy's prepare_compile to prune analysis.
cache_manager = self.create_cache_manager(invalidate_dependents=True,
fingerprint_strategy=self._jvm_fingerprint_strategy())
self._strategy.prepare_compile(cache_manager, self.context.targets(), targets_in_chunks)
def execute_chunk(self, relevant_targets):
if not relevant_targets:
return
# Invalidation check. Everything inside the with block must succeed for the
# invalid targets to become valid.
partition_size_hint, locally_changed_targets = self._strategy.invalidation_hints(relevant_targets)
with self.invalidated(relevant_targets,
invalidate_dependents=True,
partition_size_hint=partition_size_hint,
locally_changed_targets=locally_changed_targets,
fingerprint_strategy=self._jvm_fingerprint_strategy(),
topological_order=True) as invalidation_check:
if invalidation_check.invalid_vts:
# Find the invalid targets for this chunk.
invalid_targets = [vt.target for vt in invalidation_check.invalid_vts]
# Register products for all the valid targets.
# We register as we go, so dependency checking code can use this data.
valid_targets = list(set(relevant_targets) - set(invalid_targets))
valid_compile_contexts = [self._strategy.compile_context(t) for t in valid_targets]
self._register_vts(valid_compile_contexts)
# Invoke the strategy to execute compilations for invalid targets.
update_artifact_cache_vts_work = (self.get_update_artifact_cache_work
if self.artifact_cache_writes_enabled() else None)
self._strategy.compile_chunk(invalidation_check,
self.context.targets(),
relevant_targets,
invalid_targets,
self.extra_compile_time_classpath_elements(),
self._compile_vts,
self._register_vts,
update_artifact_cache_vts_work)
else:
# Nothing to build. Register products for all the targets in one go.
self._register_vts([self._strategy.compile_context(t) for t in relevant_targets])
def _compile_vts(self, vts, sources, analysis_file, upstream_analysis, classpath, outdir, progress_message):
"""Compiles sources for the given vts into the given output dir.
vts - versioned target set
sources - sources for this target set
analysis_file - the analysis file to manipulate
classpath - a list of classpath entries
outdir - the output dir to send classes to
May be invoked concurrently on independent target sets.
Postcondition: The individual targets in vts are up-to-date, as if each were
compiled individually.
"""
if not sources:
self.context.log.warn('Skipping {} compile for targets with no sources:\n {}'
.format(self.name(), vts.targets))
else:
# Do some reporting.
self.context.log.info(
'Compiling ',
items_to_report_element(sources, '{} source'.format(self.name())),
' in ',
items_to_report_element([t.address.reference() for t in vts.targets], 'target'),
' (',
progress_message,
').')
with self.context.new_workunit('compile'):
# The compiler may delete classfiles, then later exit on a compilation error. Then if the
# change triggering the error is reverted, we won't rebuild to restore the missing
# classfiles. So we force-invalidate here, to be on the safe side.
vts.force_invalidate()
self.compile(self._args, classpath, sources, outdir, upstream_analysis, analysis_file)
def check_artifact_cache(self, vts):
post_process_cached_vts = lambda vts: self._strategy.post_process_cached_vts(vts)
return self.do_check_artifact_cache(vts, post_process_cached_vts=post_process_cached_vts)
def _create_empty_products(self):
make_products = lambda: defaultdict(MultipleRootedProducts)
if self.context.products.is_required_data('classes_by_source'):
self.context.products.safe_create_data('classes_by_source', make_products)
# Whether or not anything else requires resources_by_target, this task
# uses it internally.
self.context.products.safe_create_data('resources_by_target', make_products)
# JvmDependencyAnalyzer uses classes_by_target within this run
self.context.products.safe_create_data('classes_by_target', make_products)
def _register_vts(self, compile_contexts):
classes_by_source = self.context.products.get_data('classes_by_source')
classes_by_target = self.context.products.get_data('classes_by_target')
compile_classpath = self.context.products.get_data('compile_classpath')
resources_by_target = self.context.products.get_data('resources_by_target')
# Register class products.
if classes_by_source is not None or classes_by_target is not None:
computed_classes_by_source_by_context = self._strategy.compute_classes_by_source(
compile_contexts)
resource_mapping = self._strategy.compute_resource_mapping(compile_contexts)
for compile_context in compile_contexts:
computed_classes_by_source = computed_classes_by_source_by_context[compile_context]
target = compile_context.target
classes_dir = compile_context.classes_dir
target_products = classes_by_target[target] if classes_by_target is not None else None
for source in compile_context.sources: # Sources are relative to buildroot.
classes = computed_classes_by_source.get(source, []) # Classes are absolute paths.
for cls in classes:
clsname = self._strategy.class_name_for_class_file(compile_context, cls)
resources = resource_mapping.get(clsname, [])
resources_by_target[target].add_abs_paths(classes_dir, resources)
if classes_by_target is not None:
target_products.add_abs_paths(classes_dir, classes)
if classes_by_source is not None:
classes_by_source[source].add_abs_paths(classes_dir, classes)
# Register resource products.
for compile_context in compile_contexts:
extra_resources = self.extra_products(compile_context.target)
# Add to resources_by_target (if it was requested).
if resources_by_target is not None:
target_resources = resources_by_target[compile_context.target]
for root, abs_paths in extra_resources:
target_resources.add_abs_paths(root, abs_paths)
# And to the compile_classpath, to make them available within the next round.
# TODO(stuhood): This is redundant with resources_by_target, but resources_by_target
# are not available during compilation. https://github.com/pantsbuild/pants/issues/206
entries = [(conf, root) for conf in self._confs for root, _ in extra_resources]
compile_classpath.add_for_target(compile_context.target, entries)
| apache-2.0 | 7,279,666,870,760,293,000 | 43.945946 | 110 | 0.676729 | false |
GoogleCloudPlatform/python-compat-runtime | appengine-compat/exported_appengine_sdk/google/appengine/tools/web_xml_parser.py | 1 | 8407 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Directly processes text of web.xml.
WebXmlParser is called with Xml string to produce a WebXml object containing
the data from that string.
WebXmlParser: Converts xml to AppEngineWebXml object.
WebXml: Contains relevant information from web.xml.
SecurityConstraint: Contains information about specified security constraints.
"""
import logging
from xml.etree import ElementTree
from google.appengine.tools import xml_parser_utils
from google.appengine.tools.app_engine_config_exception import AppEngineConfigException
from google.appengine.tools.value_mixin import ValueMixin
class WebXmlParser(object):
"""Provides logic for walking down XML tree and pulling data."""
def ProcessXml(self, xml_str, has_jsps=False):
"""Parses XML string and returns object representation of relevant info.
Uses ElementTree parser to return a tree representation of XML.
Then walks down that tree and extracts important info and adds it to the
object.
Args:
xml_str: The XML string itself.
has_jsps: True if the application has *.jsp files.
Returns:
If there is well-formed but illegal XML, returns a list of
errors. Otherwise, returns an AppEngineWebXml object containing
information from XML.
Raises:
AppEngineConfigException: In case of malformed XML or illegal inputs.
"""
try:
self.web_xml = WebXml()
self.web_xml.has_jsps = has_jsps
self.errors = []
xml_root = ElementTree.fromstring(xml_str)
for node in xml_root.getchildren():
self.ProcessSecondLevelNode(node)
if self.errors:
raise AppEngineConfigException('\n'.join(self.errors))
return self.web_xml
except ElementTree.ParseError:
raise AppEngineConfigException('Bad input -- not valid XML')
_IGNORED_NODES = frozenset([
'context-param', 'description', 'display-name', 'distributable',
'ejb-local-ref', 'ejb-ref', 'env-entry', 'filter', 'icon',
'jsp-config', 'listener', 'locale-encoding-mapping-list',
'login-config', 'message-destination', 'message-destination-ref',
'persistence-context-ref', 'persistence-unit-ref', 'post-construct',
'pre-destroy', 'resource-env-ref', 'resource-ref', 'security-role',
'service-ref', 'servlet', 'session-config', 'taglib',
])
def ProcessSecondLevelNode(self, node):
element_name = xml_parser_utils.GetTag(node)
if element_name in self._IGNORED_NODES:
return
camel_case_name = ''.join(part.title() for part in element_name.split('-'))
method_name = 'Process%sNode' % camel_case_name
if (hasattr(self, method_name) and
method_name is not 'ProcessSecondLevelNode'):
getattr(self, method_name)(node)
else:
logging.warning('Second-level tag not recognized: %s', element_name)
def ProcessServletMappingNode(self, node):
self._ProcessUrlMappingNode(node)
def ProcessFilterMappingNode(self, node):
self._ProcessUrlMappingNode(node)
def _ProcessUrlMappingNode(self, node):
"""Parses out URL and possible ID for filter-mapping and servlet-mapping.
Pulls url-pattern text out of node and adds to WebXml object. If url-pattern
has an id attribute, adds that as well. This is done for <servlet-mapping>
and <filter-mapping> nodes.
Args:
node: An ElementTreeNode which looks something like the following:
<servlet-mapping>
<servlet-name>redteam</servlet-name>
<url-pattern>/red/*</url-pattern>
</servlet-mapping>
"""
url_pattern_node = xml_parser_utils.GetChild(node, 'url-pattern')
if url_pattern_node is not None:
node_text = xml_parser_utils.GetNodeText(url_pattern_node)
self.web_xml.patterns.append(node_text)
id_attr = xml_parser_utils.GetAttribute(url_pattern_node, 'id')
if id_attr:
self.web_xml.pattern_to_id[node_text] = id_attr
def ProcessErrorPageNode(self, node):
"""Process error page specifications.
If one of the supplied error codes is 404, allow fall through to runtime.
Args:
node: An ElementTreeNode which looks something like the following.
<error-page>
<error-code>500</error-code>
<location>/errors/servererror.jsp</location>
</error-page>
"""
error_code = xml_parser_utils.GetChildNodeText(node, 'error-code')
if error_code == '404':
self.web_xml.fall_through_to_runtime = True
def ProcessWelcomeFileListNode(self, node):
for welcome_node in xml_parser_utils.GetNodes(node, 'welcome-file'):
welcome_file = xml_parser_utils.GetNodeText(welcome_node)
if welcome_file and welcome_file[0] == '/':
self.errors.append('Welcome files must be relative paths: %s' %
welcome_file)
continue
self.web_xml.welcome_files.append(welcome_file)
def ProcessMimeMappingNode(self, node):
extension = xml_parser_utils.GetChildNodeText(node, 'extension')
mime_type = xml_parser_utils.GetChildNodeText(node, 'mime-type')
if not extension:
self.errors.append('<mime-type> without extension')
return
self.web_xml.mime_mappings[extension] = mime_type
def ProcessSecurityConstraintNode(self, node):
"""Pulls data from the security constraint node and adds to WebXml object.
Args:
node: An ElementTree Xml node that looks something like the following:
<security-constraint>
<web-resource-collection>
<url-pattern>/profile/*</url-pattern>
</web-resource-collection>
<user-data-constraint>
<transport-guarantee>CONFIDENTIAL</transport-guarantee>
</user-data-constraint>
</security-constraint>
"""
security_constraint = SecurityConstraint()
resources_node = xml_parser_utils.GetChild(node, 'web-resource-collection')
security_constraint.patterns = [
xml_parser_utils.GetNodeText(sub_node)
for sub_node in xml_parser_utils.GetNodes(resources_node,
'url-pattern')]
constraint = xml_parser_utils.GetChild(node, 'auth-constraint')
if constraint is not None:
role_name = xml_parser_utils.GetChildNodeText(
constraint, 'role-name').lower()
if role_name:
if role_name not in ('none', '*', 'admin'):
self.errors.append('Bad value for <role-name> (%s), must be none, '
'*, or admin' % role_name)
security_constraint.required_role = role_name
user_constraint = xml_parser_utils.GetChild(node, 'user-data-constraint')
if user_constraint is not None:
guarantee = xml_parser_utils.GetChildNodeText(
user_constraint, 'transport-guarantee').lower()
if guarantee not in ('none', 'integral', 'confidential'):
self.errors.append('Bad value for <transport-guarantee> (%s), must be'
' none, integral, or confidential' % guarantee)
security_constraint.transport_guarantee = guarantee
self.web_xml.security_constraints.append(security_constraint)
class WebXml(ValueMixin):
"""Contains information about web.xml relevant for translation to app.yaml."""
def __init__(self):
self.patterns = []
self.security_constraints = []
self.welcome_files = []
self.mime_mappings = {}
self.pattern_to_id = {}
self.fall_through_to_runtime = False
self.has_jsps = False
def GetMimeTypeForPath(self, path):
if '.' not in path:
return None
return self.mime_mappings.get(path.split('.')[-1], None)
class SecurityConstraint(ValueMixin):
"""Contains information about security constraints in web.xml."""
def __init__(self):
self.patterns = []
self.transport_guarantee = 'none'
self.required_role = 'none'
| apache-2.0 | 354,165,943,435,447,300 | 34.92735 | 87 | 0.679434 | false |
skinkie/SleekXMPP--XEP-0080- | sleekxmpp/xmlstream/filesocket.py | 1 | 1213 | """
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from socket import _fileobject
import socket
class FileSocket(_fileobject):
"""
Create a file object wrapper for a socket to work around
issues present in Python 2.6 when using sockets as file objects.
The parser for xml.etree.cElementTree requires a file, but we will
be reading from the XMPP connection socket instead.
"""
def read(self, size=4096):
"""Read data from the socket as if it were a file."""
data = self._sock.recv(size)
if data is not None:
return data
class Socket26(socket._socketobject):
"""
A custom socket implementation that uses our own FileSocket class
to work around issues in Python 2.6 when using sockets as files.
"""
def makefile(self, mode='r', bufsize=-1):
"""makefile([mode[, bufsize]]) -> file object
Return a regular file object corresponding to the socket. The mode
and bufsize arguments are as for the built-in open() function."""
return FileSocket(self._sock, mode, bufsize)
| mit | 6,556,671,338,584,989,000 | 28.585366 | 75 | 0.66859 | false |
azumimuo/family-xbmc-addon | plugin.video.showboxarize/resources/lib/sources_de/1kino.py | 1 | 4950 | # -*- coding: utf-8 -*-
"""
Flixnet Add-on
Copyright (C) 2016 Viper2k4
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
import urlparse
import json
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
from resources.lib.modules import dom_parser
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.domains = ['1kino.in', 'streamkiste.tv']
self.base_link = 'http://1kino.in'
self.search_link = '/include/live.php?keyword=%s&nonce=%s'
self.search_js = '/js/live-search.js'
def movie(self, imdb, title, localtitle, year):
try:
url = self.__search(title, year)
if not url and title != localtitle: url = self.__search(localtitle, year)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
pid = re.findall('[e|t]\s*=\s*"(\w+)"\s*,', r)[0]
r = dom_parser.parse_dom(r, 'div', attrs={'id': 'stream-container'})[0].content
r = re.compile('<div id="stream-h">.*?</li>.*?</div>\s*</div>', re.IGNORECASE | re.DOTALL).findall(r)
r = [(dom_parser.parse_dom(i, 'div', attrs={'id': 'mirror-head'}), dom_parser.parse_dom(i, 'div', attrs={'id': 'stream-links'})) for i in r]
r = [(i[0][0].content, i[1]) for i in r if i[0]]
r = [(re.findall('.+\s[\||-]\s(.*)', i[0]), i[1]) for i in r]
r = [(i[0][0].strip(), i[1]) for i in r if len(i[0]) > 0]
for name, links in r:
quality, info = source_utils.get_release_quality(name)
links = [dom_parser.parse_dom(i.content, 'a', req=['href', 'title', 'data-mirror', 'data-host']) for i in links]
links = [([i[0].attrs.get('data-mirror'), i[0].attrs.get('data-host'), pid, url], i[0].content) for i in links]
info = ' | '.join(info)
for link, hoster in links:
valid, hoster = source_utils.is_host_valid(hoster, hostDict)
if not valid: continue
link = urllib.urlencode({'mirror': link[0], 'host': link[1], 'pid': link[2], 'ceck': 'sk'})
sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'url': link, 'info': info, 'direct': False, 'debridonly': False, 'checkquality': True})
return sources
except:
return sources
def resolve(self, url):
try:
r = client.request(urlparse.urljoin(self.base_link, '/include/load.php'), post=url, XHR=True)
r = r.replace('\r', '').replace('\n', '')
links = [i.attrs['href'] for i in dom_parser.parse_dom(r, 'a', req='href') if i]
ifrms = [i.attrs['src'].strip() for i in dom_parser.parse_dom(r, 'iframe', req='src') if i]
links += ifrms
for link in links:
if not link.startswith('http'): link = urlparse.urljoin(self.base_link, link)
if self.base_link in link:
link = client.request(link, output='geturl')
if self.base_link not in link:
return link
except:
return
def __search(self, title, year):
try:
n = client.request(urlparse.urljoin(self.base_link, self.search_js))
try: n = re.findall('nonce=([0-9a-zA-Z]+)', n)[0]
except: n = '273e0f8ea3'
query = self.search_link % (urllib.quote_plus(cleantitle.query(title)), n)
query = urlparse.urljoin(self.base_link, query)
t = cleantitle.get(title)
y = ['%s' % str(year), '%s' % str(int(year) + 1), '%s' % str(int(year) - 1), '0']
r = client.request(query)
r = json.loads(r)
r = [(r[i].get('url'), r[i].get('title'), r[i].get('extra').get('date')) for i in r]
r = [i[0] for i in r if t == cleantitle.get(i[1]) and i[2] in y][0]
return source_utils.strip_domain(r)
except:
return | gpl-2.0 | -1,739,964,767,721,680,600 | 37.679688 | 179 | 0.552525 | false |
BlackVegetable/starcraft-oracle | sc2reader-master/sc2reader/constants.py | 1 | 3068 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals, division
# These are found in Repack-MPQ/fileset.{locale}#Mods#Core.SC2Mod#{locale}.SC2Data/LocalizedData/Editor/EditorCategoryStrings.txt
# EDSTR_CATEGORY_Race
# EDSTR_PLAYERPROPS_RACE
# The ??? means that I don't know what language it is.
# If multiple languages use the same set they should be comma separated
LOCALIZED_RACES = {
# enUS
'Terran': 'Terran',
'Protoss': 'Protoss',
'Zerg': 'Zerg',
# ruRU
'Терран': 'Terran',
'Протосс': 'Protoss',
'Зерг': 'Zerg',
# koKR
'테란': 'Terran',
'프로토스': 'Protoss',
'저그': 'Zerg',
# ??eu
'Terranie': 'Terran',
'Protosi': 'Protoss',
'Zergi': 'Zerg',
# zhCH
'人类': 'Terran',
'星灵': 'Protoss',
'异虫': 'Zerg',
# zhTW
'人類': 'Terran',
'神族': 'Protoss',
'蟲族': 'Zerg',
# ???
'Terrano': 'Terran',
# deDE
'Terraner': 'Terran',
# esES - Spanish
# esMX - Latin American
# frFR - French - France
# plPL - Polish Polish
# ptBR - Brazilian Portuguese
}
MESSAGE_CODES = {
'0': 'All',
'2': 'Allies',
'128': 'Header',
'125': 'Ping',
}
GAME_SPEED_FACTOR = {
'Slower': 0.6,
'Slow': 0.8,
'Normal': 1.0,
'Fast': 1.2,
'Faster': 1.4
}
GATEWAY_CODES = {
'US': 'Americas',
'KR': 'Asia',
'EU': 'Europe',
'SG': 'South East Asia',
'XX': 'Public Test',
}
GATEWAY_LOOKUP = {
0: '',
1: 'us',
2: 'eu',
3: 'kr',
5: 'cn',
6: 'sea',
98: 'xx',
}
COLOR_CODES = {
'B4141E': 'Red',
'0042FF': 'Blue',
'1CA7EA': 'Teal',
'EBE129': 'Yellow',
'540081': 'Purple',
'FE8A0E': 'Orange',
'168000': 'Green',
'CCA6FC': 'Light Pink',
'1F01C9': 'Violet',
'525494': 'Light Grey',
'106246': 'Dark Green',
'4E2A04': 'Brown',
'96FF91': 'Light Green',
'232323': 'Dark Grey',
'E55BB0': 'Pink',
'FFFFFF': 'White',
'000000': 'Black',
}
COLOR_CODES_INV = dict(zip(COLOR_CODES.values(), COLOR_CODES.keys()))
REGIONS = {
# United States
'us': {
1: 'us',
2: 'la',
},
# Europe
'eu': {
1: 'eu',
2: 'ru',
},
# Korea - appear to both map to same place
'kr': {
1: 'kr',
2: 'tw',
},
# Taiwan - appear to both map to same place
'tw': {
1: 'kr',
2: 'tw',
},
# China - different url scheme (www.battlenet.com.cn)?
'cn': {
1: 'cn',
},
# South East Asia
'sea': {
1: 'sea',
},
# Singapore
'sg': {
1: 'sg',
},
# Public Test
'xx': {
1: 'xx',
},
}
import json
import pkgutil
attributes_json = pkgutil.get_data('sc2reader.data', 'attributes.json').decode('utf8')
attributes_dict = json.loads(attributes_json)
LOBBY_PROPERTIES = dict()
for key, value in attributes_dict.get('attributes', dict()).items():
LOBBY_PROPERTIES[int(key)] = value
| mit | 1,542,272,283,265,632,300 | 17.359756 | 129 | 0.50714 | false |
kundajelab/simdna | simdna/simdnautil/pwm.py | 1 | 5397 | from __future__ import absolute_import, division, print_function
import numpy as np
from simdna.simdnautil.util import DEFAULT_LETTER_TO_INDEX
from simdna.simdnautil import util
import math
class PWM(object):
"""
Object representing a position weight matrix;
allows sampling from the PWM either randomly or taking the best hit.
:param name: name of the PWM
:param letterToIndex: dictionary mapping from letter to index. Defaults
to ACGT ordering.
:param probMatrix: rows of the PWM (in probability space). Can be added
later too by calling addRows.
:param pseudocountProb: smoothing factor to add to probMatrix. Specify
this in the constructor if you are also specifying probMatrix in
the constructor.
"""
def __init__(self, name, letterToIndex=DEFAULT_LETTER_TO_INDEX,
probMatrix=None, pseudocountProb=None):
self.name = name
self.letterToIndex = letterToIndex
self.indexToLetter = dict(
(self.letterToIndex[x], x) for x in self.letterToIndex)
self._rows = []
self._finalised = False
if (probMatrix is not None):
self.addRows(matrix=probMatrix)
if (pseudocountProb is not None):
assert probMatrix is not None,(
"please specify probMatrix in the constructor if you are"
+"going to specify pseudocountProb in the constructor")
self.finalise(pseudocountProb=pseudocountProb)
def add_row(self, weights):
self.addRow(weights)
"""
Add row to the end of the PWM. Must be specified in probability
space.
:param weights: a row of the PWM (in probability space)
"""
def addRow(self, weights):
if (len(self._rows) > 0):
assert len(weights) == len(self._rows[0])
self._rows.append(weights)
"""
See addRows
"""
def add_rows(self, matrix):
self.addRows(matrix)
"""
Add rows of 'matrix' to the end of the PWM. Must be specified in probability
space.
:param matrix: rows of the PWM (in probability space)
:return: self
"""
def addRows(self, matrix):
for row in matrix:
self.addRow(weights=row)
return self
def finalize(self, pseudocountProb=0.001):
self.finalise(pseudocountProb=pseudocountProb)
def finalise(self, pseudocountProb=0.001):
"""
Function run after loading the weight matrix to smooth
the PWM after loading is complete
:param pseudocountProb: smoothing factor
:return:
"""
assert pseudocountProb >= 0 and pseudocountProb < 1
# will smoothen the rows with a pseudocount...
self._rows = np.array(self._rows)
self._rows = self._rows * \
(1 - pseudocountProb) + float(pseudocountProb) / len(self._rows[0])
for row in self._rows:
assert(abs(sum(row) - 1.0) < 0.0001)
self._logRows = np.log(self._rows)
self._finalised = True
self.bestPwmHit = self.computeBestHitGivenMatrix(self._rows)
self.pwmSize = len(self._rows)
return self
def get_best_hit(self):
return self.bestPwmHit
def getBestHit(self):
return self.bestPwmHit
def compute_best_hit_given_matrix(self, matrix):
"""
Compute the highest probability instance of the PWM
:param matrix: the matrix to use to copmute the PWM
:return: the string best hit
"""
return "".join(self.indexToLetter[x] for x in (np.argmax(matrix, axis=1)))
def computeBestHitGivenMatrix(self, matrix):
"""
Compute the highest probability instance of the PWM
:param matrix: the matrix to use to copmute the PWM
:return: the string best hit
"""
return "".join(self.indexToLetter[x] for x in (np.argmax(matrix, axis=1)))
def get_rows(self):
return self.getRows()
def getRows(self):
if (not self._finalised):
raise RuntimeError("Please call finalise on " + str(self.name))
return self._rows
def sample_from_pwm(self, bg=None):
self.sampleFromPwm(bg=bg)
def sampleFromPwm(self, bg=None):
"""
Randomly sample according to the PWM; if a background is included
then compute the logodds relative to that background and return.
:param bg: background frequency to compute relative to
:return: sample or (sample and logodds) if bg is not None
"""
if (not self._finalised):
raise RuntimeError("Please call finalise on " + str(self.name))
sampledLetters = []
logOdds = 0
for row in self._rows:
sampledIndex = util.sampleFromProbsArr(row)
letter = self.indexToLetter[sampledIndex]
if (bg is not None):
logOdds += np.log(row[sampledIndex]) - np.log(bg[letter])
sampledLetters.append(letter)
sampledHit = "".join(sampledLetters)
if (bg is not None):
return (sampledHit, logOdds)
else:
return sampledHit
def sample_from_pwm_and_score(self, bg):
return self.sampleFromPwm(bg=bg)
def sampleFromPwmAndScore(self, bg):
return self.sampleFromPwm(bg=bg)
def __str__(self):
return self.name + "\n" + str(self._rows)
| mit | -9,102,593,418,064,766,000 | 33.158228 | 82 | 0.621642 | false |
MichalBusta/FASText | tools/icdarUtils.py | 1 | 41513 | '''
Created on Jan 7, 2015
@author: busta
'''
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import utils
from ft import FASTex
import pylab
import pandas
def draw_missed_letters(input_dir='/datagrid/personal/TextSpotter/FastTextEval/ICDAR-Train', color = 0, edgeThreshold = 12, inter = True, scalingFactor=0.5):
ft = FASTex(process_color = color, edgeThreshold = edgeThreshold)
d=input_dir
subdirs = [os.path.join(d,o) for o in os.listdir(d) if os.path.isdir(os.path.join(d,o))]
subdirs = np.sort(subdirs)
lastDir = ''
for dir_name in subdirs:
file_name = '{0}/evaluation.npz'.format(dir_name)
if not os.path.exists(file_name):
continue
vars_dict = np.load(file_name)
inputDir = vars_dict['inputDir']
lastDir = dir_name
if 'letterKeypointHistogram' in vars.keys():
letterKeypointHistogram = vars_dict['letterKeypointHistogram']
letterKeypointHistogram = dict(letterKeypointHistogram.tolist())
print(lastDir)
missing_letters = vars['missing_letters']
missing_letters = dict(missing_letters.tolist())
segmDir = '{0}/segmentations'.format(inputDir)
keys = []
ticks = []
values = []
values.append([])
values.append([])
values.append([])
values.append([])
ticks.append([])
ticks.append([])
ticks.append([])
ticks.append([])
listlen = 0
for letter in letterKeypointHistogram.keys():
keys.append(letter)
values[0].append(0)
ticks[0].append(listlen)
values[1].append(0)
ticks[1].append(listlen + 0.2)
values[2].append(0)
ticks[2].append(listlen + 0.4)
values[3].append(0)
ticks[3].append(listlen + 0.6)
for num in letterKeypointHistogram[letter].keys():
values[num][listlen] = letterKeypointHistogram[letter][num]
listlen += 1
indices = sorted(range(len(values[0])),key=lambda x:values[0][x])
indices.reverse()
border = 25
for letter in np.asarray(keys)[np.asarray(indices)]:
if not missing_letters.has_key(letter):
continue
arr = missing_letters[letter]
for i in range(len(arr)):
miss = arr[i]
gt0 = miss[1]
gt = [gt0[0] - border, gt0[1] - border, gt0[2] + border, gt0[3] + border ]
gt[0] = max(0, gt[0])
gt[1] = max(0, gt[1])
if color == 1:
img = cv2.imread(miss[0])
else:
img = cv2.imread(miss[0], 0)
gt[2] = min(img.shape[1], gt[2])
gt[3] = min(img.shape[0], gt[3])
baseName = os.path.basename(miss[0])
baseName = baseName[:-4]
segmImg = '{0}/{1}_GT.bmp'.format(segmDir, baseName)
segmImg = '{0}/{1}_GT.bmp'.format(segmDir, baseName)
if not os.path.exists(segmImg):
segmImg = '{0}/gt_{1}.png'.format(segmDir, baseName)
segmImg = cv2.imread(segmImg)
segmentations = ft.getCharSegmentations(img)
keypoints = ft.getLastDetectionKeypoints()
scales = ft.getImageScales()
centers = segmImg[keypoints[:, 1].astype(int), keypoints[:, 0].astype(int)]
keypointsInsideMask = centers == (255, 255, 255)
keypointsInsideMask = np.invert(np.bitwise_and(np.bitwise_and(keypointsInsideMask[:, 0], keypointsInsideMask[:, 1]), keypointsInsideMask[:, 2]))
keypointsInside = keypoints[keypointsInsideMask, :]
octaves = np.unique( keypointsInside[:, 2])
maxOctave = 0
if octaves.shape[0] > 0:
maxOctave = np.max(octaves)
mask = (keypoints[:, 0] > gt[0]) * (keypoints[:, 0] < gt[2]) * (keypoints[:, 1] > gt[1]) * (keypoints[:, 1] < gt[3])
images = []
octPoints = []
octScales = []
keypointsInRect = keypoints[mask, :]
for i in range(int(maxOctave) + 1):
scale = scales[i]
ft = FASTex(process_color = color, edgeThreshold = edgeThreshold)
octavePoints = keypointsInRect[keypointsInRect[:, 2] == i, :].copy()
if octavePoints.shape[0] > 0:
dst = ft.getImageAtScale(i)
images.append(dst)
octavePoints[:, 0] *= scales[i]
octavePoints[:, 1] *= scales[i]
octavePoints[:, 5] *= scales[i]
octavePoints[:, 6] *= scales[i]
octavePoints[:, 7] *= scales[i]
octavePoints[:, 8] *= scales[i]
octPoints.append(octavePoints)
octScales.append(scale)
f, axes = plt.subplots(1, 1 + len(images), figsize=(16, 3))
if len(images) > 0:
ax = axes[0]
else:
ax = axes
if color == 1:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
zoom = img[gt[1]:gt[3], gt[0]:gt[2]]
ax.imshow(zoom, cmap=pylab.gray(), interpolation='nearest')
kpMask = keypoints[mask]
kpMask[:, 0] = kpMask[:, 0] - gt[0]
kpMask[:, 1] = kpMask[:, 1] - gt[1]
kpMask[:, 7] = kpMask[:, 7] - gt[0]
kpMask[:, 8] = kpMask[:, 8] - gt[1]
ax.plot(kpMask[:, 0], kpMask[:, 1], 'ro')
for k in range(kpMask.shape[0]):
ax.plot([kpMask[k,0], kpMask[k,7]], [kpMask[k,1], kpMask[k,8]], 'r-')
style = 'rx'
if kpMask.shape[1] > 9:
for k in range(3):
maski = kpMask[:, 9] == k + 1
if k == 1:
style = "rv"
if k == 2:
style = "rs"
if k == 4:
style = "bo"
if k == 5:
style = "yo"
ax.plot([kpMask[maski,7]], [kpMask[maski,8]], style)
mask = (keypointsInside[:, 0] > gt[0]) * (keypointsInside[:, 0] < gt[2]) * (keypointsInside[:, 1] > gt[1]) * (keypointsInside[:, 1] < gt[3])
kpMask = keypointsInside[mask]
keypointsInside[:, 0] = keypointsInside[:, 0] - gt[0]
keypointsInside[:, 1] = keypointsInside[:, 1] - gt[1]
keypointsInside[:, 7] = keypointsInside[:, 7] - gt[0]
keypointsInside[:, 8] = keypointsInside[:, 8] - gt[1]
ax.plot(keypointsInside[:, 0], keypointsInside[:, 1], 'go')
for k in range(keypointsInside.shape[0]):
ax.plot([keypointsInside[k,0], keypointsInside[k,7]], [keypointsInside[k,1], keypointsInside[k,8]], 'g-')
ax.set_xlim(0, gt[2] - max(0, gt[0]))
ax.set_ylim((gt[3] - max(0, gt[1]), 0))
line = mlines.Line2D(np.array([gt0[0] - gt[0], gt0[2] - gt[0], gt0[2] - gt[0], gt0[0] - gt[0], gt0[0] - gt[0]]), np.array([gt0[1] - gt[1], gt0[1] - gt[1], gt0[3] - gt[1], gt0[3] - gt[1], gt0[1] - gt[1]]), lw=5., alpha=0.4, color='b')
ax.add_line(line)
f.suptitle('Missing letter: {0} ({1})'.format(gt0[4], miss[0]))
for ai in range(len(images)):
ax = axes[ai + 1]
scale = octScales[ai]
gts = (gt[0] * scale, gt[1] * scale, gt[2] * scale, gt[3] * scale)
ax.plot(octPoints[ai][:, 0] - gts[0], octPoints[ai][:, 1] - gts[1], 'ro')
zoom = images[ai][int(gt[1] * scale):int(gt[3] * scale), int(gt[0] * scale):int(gt[2] * scale)]
ax.imshow(zoom, cmap=pylab.gray(), interpolation='nearest')
ax.set_xlim(0, gts[2] - max(0, gts[0]))
ax.set_ylim((gts[3] - max(0, gts[1]), 0))
plt.show()
def draw_missed_segm(input_dir='/datagrid/personal/TextSpotter/FastTextEval/ICDAR-Train', color = 0, edgeThreshold = 12, inter = True, scalingFactor=0.5):
ft = FASTex(process_color = color, edgeThreshold = edgeThreshold)
d=input_dir
subdirs = [os.path.join(d,o) for o in os.listdir(d) if os.path.isdir(os.path.join(d,o))]
subdirs = np.sort(subdirs)
for dir_name in subdirs:
file_name = '{0}/evaluation.npz'.format(dir_name)
if not os.path.exists(file_name):
continue
vars_dict = np.load(file_name)
inputDir = vars_dict['inputDir']
missing_segm = vars['missing_segm']
missing_segm = dict(missing_segm.tolist())
segmDir = '{0}/segmentations'.format(inputDir)
for image in missing_segm.keys():
arr = missing_segm[image]
if color == 1:
img = cv2.imread(image)
else:
img = cv2.imread(image, 0)
segmentations = ft.getCharSegmentations(img)
keypoints = ft.getLastDetectionKeypoints()
baseName = os.path.basename(image)
baseName = baseName[:-4]
segmImg = '{0}/{1}_GT.bmp'.format(segmDir, baseName)
if not os.path.exists(segmImg):
segmImg = '{0}/gt_{1}.png'.format(segmDir, baseName)
segmImg = cv2.imread(segmImg)
centers = segmImg[keypoints[:, 1].astype(int), keypoints[:, 0].astype(int)]
keypointsInsideMask = centers == (255, 255, 255)
keypointsInsideMask = np.invert(np.bitwise_and(np.bitwise_and(keypointsInsideMask[:, 0], keypointsInsideMask[:, 1]), keypointsInsideMask[:, 2]))
keypointsInside = keypoints[keypointsInsideMask, :]
f = plt.figure(num = 110)
ax = f.add_subplot(111)
ax.imshow(img, cmap=pylab.gray(), interpolation='nearest')
style = "rx"
for k in range(6):
maski = keypoints[:, 9] == k + 1
if k == 1:
style = "rv"
if k == 2:
style = "ro"
if k == 4:
style = "bo"
if k == 5:
style = "yo"
ax.plot(keypoints[maski, 0], keypoints[maski, 1], style)
ax.plot(keypointsInside[:, 0], keypointsInside[:, 1], 'go')
ax.set_xlim(0, img.shape[1])
ax.set_ylim(img.shape[0], 0)
for i in range(len(arr)):
miss_gt = arr[i]
line = mlines.Line2D(np.array([miss_gt[0], miss_gt[2], miss_gt[2], miss_gt[0], miss_gt[0]]), np.array([miss_gt[1], miss_gt[1], miss_gt[3], miss_gt[3], miss_gt[1]]), lw=5., alpha=0.4, color='b')
ax.add_line(line)
ax.set_title('Missing segmentation: {0}'.format(image))
plt.show()
def plot_keypoints_histograms(vars_dict):
f, ax = plt.subplots(2, sharex=True)
hist = vars_dict['hist']
ax[0].plot(hist)
ax[0].set_title('FAST Keypoints Histogram')
plt.xlabel('Intensity')
plt.ylabel('Keypoints Count')
ax[0].set_xlim([0, 255])
hist = vars_dict['histFp']
ax[1].plot(hist)
ax[1].set_title('FAST Keypoints Histogram - False Positives')
ax[1].set_xlim([0, 255])
f, ax = plt.subplots(2, sharex=True)
histDist = vars_dict['histDist']
ax[0].plot(histDist)
ax[0].set_title('FAST Keypoints Scores')
plt.xlabel('Score')
plt.ylabel('Keypoints Count')
ax[0].set_xlim([0, 255])
histDistFp = vars_dict['histDistFp']
ax[1].plot(histDistFp)
ax[1].set_title('FAST Keypoints Scores')
ax[1].set_xlim([0, 255])
f, ax = plt.subplots(4, sharex=True)
histDist = vars_dict['histDistMax']
ax[0].plot(histDist)
ax[0].set_title('Keypoints on Letter')
plt.xlabel('Distance')
ax[0].set_ylabel('Keypoints Count')
ax[0].set_xlim([0, 255])
histDistFp = vars_dict['histDistMaxFp']
ax[1].plot(histDistFp)
ax[1].set_title('Keypoints Outside Letter')
ax[1].set_ylabel('Keypoints Count')
ax[1].set_xlim([0, 255])
histDistMaxWhite = vars_dict['histDistMaxWhite']
ax[2].plot(histDistMaxWhite)
ax[2].set_title('Black Ink Keypoints')
ax[2].set_ylabel('Keypoints Count')
ax[2].set_xlim([0, 255])
histDistMaxWhiteFp = vars_dict['histDistMaxWhiteFp']
ax[3].plot(histDistMaxWhiteFp)
ax[3].set_title('Black Ink Keypoints - Outside')
ax[3].set_ylabel('Keypoints Count')
ax[3].set_xlim([0, 255])
hist2dDist = vars_dict['hist2dDist']
hist2dDistFp = vars_dict['hist2dDistFp']
fig, ax = plt.subplots(1, 2, sharey=True, figsize=(17, 8))
ax[0].set_xlabel('Intensity')
ax[0].set_ylabel('Max Distance')
ax[0].set_xlim([0, 255])
ax[0].set_ylim([0, 255])
imgplot = ax[0].imshow(hist2dDist, interpolation='nearest', origin='low')
ax[0].set_title('Kepoints Inside')
imgplot.set_cmap('hot')
ax[1].set_title('Kepoints Ouside')
ax[1].set_xlabel('Intensity')
ax[1].set_ylabel('Max Distance')
imgplot = ax[1].imshow(hist2dDistFp, interpolation='nearest', origin='low')
imgplot.set_cmap('hot')
ax[1].set_xlim([0, 255])
ax[1].set_ylim([0, 255])
hist2dDist = vars_dict['hist2dDistScore']
hist2dDistFp = vars_dict['hist2dDistScoreFp']
fig, ax = plt.subplots()
ax.set_xlabel('Score')
ax.set_ylabel('DistMax')
imgplot = ax.imshow(hist2dDist, interpolation='nearest', origin='low')
ax.set_title('Kepoints Inside')
imgplot.set_cmap('hot')
fig, ax = plt.subplots()
ax.set_title('Kepoints Ouside')
ax.set_xlabel('Score')
ax.set_ylabel('DistMax')
imgplot = ax.imshow(hist2dDistFp, interpolation='nearest', origin='low')
imgplot.set_cmap('hot')
def collect_histograms(img, segmImg, keypoints, values, diffValMax, keypointsTotalInside, diffMaxOctavesMap, diffScoreOctavesMap, hist, histFp, histDist, histDistMax, histDistMaxWhite, hist2dDist, hist2dDistScore, histDistFp, histDistMaxFp, histDistMaxWhiteFp, hist2dDistFp, hist2dDistScoreFp):
centers = segmImg[keypoints[:, 1].astype(int), keypoints[:, 0].astype(int)]
keypointsInsideMask = (centers == (255, 255, 255))
keypointsInsideMask = np.invert( np.bitwise_and(np.bitwise_and(keypointsInsideMask[:, 0], keypointsInsideMask[:, 1]), keypointsInsideMask[:, 2]) )
keypointsTotalInside += np.count_nonzero(keypointsInsideMask)
centers2 = segmImg[keypoints[:, 8].astype(int), keypoints[:, 7].astype(int)]
keypointsInsideMask2 = (centers2 == (255, 255, 255))
keypointsInsideMask2 = np.invert( np.bitwise_and(np.bitwise_and(keypointsInsideMask2[:, 0], keypointsInsideMask2[:, 1]), keypointsInsideMask2[:, 2]) )
keypointsInsideMask = np.bitwise_or(keypointsInsideMask, keypointsInsideMask2)
keypointsInside = keypoints[keypointsInsideMask, :]
maskBlackInk = img[keypoints[:, 8].astype(int), keypoints[:, 7].astype(int)] <= img[keypoints[:, 6].astype(int), keypoints[:, 5].astype(int)]
maskWhiteInk = np.invert(maskBlackInk)
octaves = np.unique( keypointsInside[:, 2])
if len(octaves) > 0:
maxOctave = np.max(octaves)
difMaxInside = diffValMax[keypointsInsideMask]
for i in range(int(maxOctave) + 1):
difMaxInsideOctave = difMaxInside[keypointsInside[:, 2] == i]
keypointsOctaveScore = keypointsInside[keypointsInside[:, 2] == i, 3]
if difMaxInsideOctave.shape[0] > 0:
if diffMaxOctavesMap.has_key(i):
diffMaxOctavesMap[i] = np.hstack( (diffMaxOctavesMap[i], np.copy(difMaxInsideOctave)))
diffScoreOctavesMap[i] = np.hstack( (diffScoreOctavesMap[i], np.copy(keypointsOctaveScore) ) )
else:
diffMaxOctavesMap[i] = np.copy(difMaxInsideOctave)
diffScoreOctavesMap[i] = np.copy(keypointsOctaveScore)
bins = np.arange(255)
if hist is None:
hist = np.histogram(values[keypointsInsideMask], bins=bins)[0]
histDist = np.histogram(keypointsInside[:, 3], bins=bins)[0]
histDistMax = np.histogram(diffValMax[keypointsInsideMask], bins=bins)[0]
histDistMaxWhite = np.histogram(diffValMax[np.bitwise_and(keypointsInsideMask, maskWhiteInk)], bins=bins)[0]
hist2dDist = np.histogram2d(values[keypointsInsideMask], diffValMax[keypointsInsideMask], [bins, bins])[0]
hist2dDistScore = np.histogram2d(keypointsInside[:, 3].astype(np.uint8), diffValMax[keypointsInsideMask], [bins, bins])[0]
else:
hist = np.add(hist, np.histogram(values[keypointsInsideMask], bins)[0])
histDist = np.add(histDist, np.histogram(keypointsInside[:, 3], bins=bins)[0])
histDistMax = np.add(histDistMax, np.histogram(diffValMax[keypointsInsideMask], bins=bins)[0])
histDistMaxWhite = np.add(histDistMaxWhite, np.histogram(diffValMax[np.bitwise_and(keypointsInsideMask, maskWhiteInk)], bins=bins)[0])
hist2dDist = np.add(hist2dDist, np.histogram2d(values[keypointsInsideMask], diffValMax[keypointsInsideMask], [bins, bins])[0])
hist2dDistScore = np.add(hist2dDistScore, np.histogram2d(keypointsInside[:, 3].astype(np.uint8), diffValMax[keypointsInsideMask], [bins, bins])[0])
outsideMask = np.invert(keypointsInsideMask)
keypointsOutside = keypoints[outsideMask, :]
valuesFp = img[keypointsOutside[:, 1].astype(int), keypointsOutside[:, 0].astype(int)]
if histFp is None:
histFp = np.histogram(valuesFp, bins=bins)[0]
histDistFp = np.histogram(keypointsOutside[:, 3], bins=bins)[0]
histDistMaxFp = np.histogram(diffValMax[outsideMask], bins=bins)[0]
histDistMaxWhiteFp = np.histogram(diffValMax[np.bitwise_and(outsideMask, maskWhiteInk)], bins=bins)[0]
hist2dDistFp = np.histogram2d(values[outsideMask], diffValMax[outsideMask], [bins, bins])[0]
hist2dDistScoreFp = np.histogram2d(keypointsOutside[:, 3], diffValMax[outsideMask], [bins, bins])[0]
else:
histFp = np.add(histFp, np.histogram(valuesFp, bins)[0])
histDistFp = np.add(histDistFp, np.histogram(keypointsOutside[:, 3], bins=bins)[0])
histDistMaxFp = np.add(histDistMaxFp, np.histogram(diffValMax[outsideMask], bins=bins)[0])
histDistMaxWhiteFp = np.add(histDistMaxWhiteFp, np.histogram(diffValMax[np.bitwise_and(outsideMask, maskWhiteInk)], bins=bins)[0])
hist2dDistFp = np.add(hist2dDistFp, np.histogram2d(values[outsideMask], diffValMax[outsideMask], [bins, bins])[0])
hist2dDistScoreFp = np.add(hist2dDistScoreFp, np.histogram2d(keypointsOutside[:, 3], diffValMax[outsideMask], [bins, bins])[0])
return (hist, histFp, histDist, histDistMax, histDistMaxWhite, hist2dDist, hist2dDistScore, histDistFp, histDistMaxFp, histDistMaxWhiteFp, hist2dDistFp, hist2dDistScoreFp, keypointsInside)
def draw_missed_letters_figure(input_dir='/datagrid/personal/TextSpotter/FastTextEval/ICDAR-Train', color = 0, edgeThreshold = 13, inter = True, scalingFactor=0.5, segmList=[]):
ft = FASTex(process_color = color, edgeThreshold = edgeThreshold)
d=input_dir
subdirs = [os.path.join(d,o) for o in os.listdir(d) if os.path.isdir(os.path.join(d,o))]
subdirs = np.sort(subdirs)
lastDir = ''
for dir_name in subdirs:
file_name = '{0}/evaluation.npz'.format(dir_name)
if not os.path.exists(file_name):
continue
vars_dict = np.load(file_name)
inputDir = vars_dict['inputDir']
lastDir = dir_name
if 'letterKeypointHistogram' in vars_dict.keys():
letterKeypointHistogram = vars_dict['letterKeypointHistogram']
letterKeypointHistogram = dict(letterKeypointHistogram.tolist())
print(lastDir)
missing_letters = vars_dict['missing_letters']
missing_letters = dict(missing_letters.tolist())
keys = []
ticks = []
values = []
values.append([])
values.append([])
values.append([])
values.append([])
ticks.append([])
ticks.append([])
ticks.append([])
ticks.append([])
listlen = 0
for letter in letterKeypointHistogram.keys():
keys.append(letter)
values[0].append(0)
ticks[0].append(listlen)
values[1].append(0)
ticks[1].append(listlen + 0.2)
values[2].append(0)
ticks[2].append(listlen + 0.4)
values[3].append(0)
ticks[3].append(listlen + 0.6)
for num in letterKeypointHistogram[letter].keys():
values[num][listlen] = letterKeypointHistogram[letter][num]
listlen += 1
indices = sorted(range(len(values[0])),key=lambda x:values[0][x])
indices.reverse()
missLetter = []
imagesMiss = {}
for letter in np.asarray(keys)[np.asarray(indices)]:
if not missing_letters.has_key(letter):
continue
arr = missing_letters[letter]
for i in range(len(arr)):
miss = arr[i]
if len(segmList) > 0:
base = os.path.basename(miss[0])
if not base in segmList:
continue
missLetter.append(miss)
if imagesMiss.has_key(miss[0]):
imagesMiss[miss[0]].append( miss[1] )
else:
imagesMiss[miss[0]] = []
imagesMiss[miss[0]].append( miss[1] )
for image in imagesMiss.keys():
f = plt.figure(num = 250)
ax = f.add_subplot(111)
imgc2 = cv2.imread(image)
img = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
imgc2 = cv2.cvtColor(imgc2, cv2.COLOR_BGR2RGB)
ax.imshow(imgc2)
segmentations = ft.getCharSegmentations(img)
keypoints = ft.getLastDetectionKeypoints()
octaves = np.unique( keypoints[:, 2])
maxOctave = np.max(octaves)
scales = ft.getImageScales()
for i in range(int(maxOctave) + 1):
octavePoints = keypoints[keypoints[:, 2] == i, :]
c = 'red'
if len(octavePoints) > 0 and octavePoints.shape[1] > 9:
for k in range(6):
maski = octavePoints[:, 9] == k + 1
if k == 1:
style = "rv"
if k == 2:
style = "ro"
if k == 4:
style = "bo"
c = 'blue'
if k == 5:
style = "yo"
continue
s = 10 / scales[i]
ax.scatter(octavePoints[maski, 0], octavePoints[maski, 1],c=c, s=s )
for i in range(len(imagesMiss[image])):
gt0 = imagesMiss[image][i]
line = mlines.Line2D(np.array([gt0[0], gt0[2], gt0[2], gt0[0], gt0[0]]), np.array([gt0[1], gt0[1], gt0[3], gt0[3], gt0[1]]), lw=5., alpha=0.6, color='r')
ax.add_line(line)
plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0)
plt.margins(0,0)
ax.set_xlim([0, imgc2.shape[1]])
ax.set_ylim([imgc2.shape[0], 0])
ax.axes.get_xaxis().set_ticks([])
ax.axes.get_yaxis().set_ticks([])
plt.show()
def draw_missed_letters_tile(input_dir='/datagrid/personal/TextSpotter/FastTextEval/ICDAR-Train', color = 0, edgeThreshold = 13, inter = True, scalingFactor=1.6, segmList=[]):
ft = FASTex(process_color = color, edgeThreshold = edgeThreshold)
d=input_dir
subdirs = [os.path.join(d,o) for o in os.listdir(d) if os.path.isdir(os.path.join(d,o))]
subdirs = np.sort(subdirs)
lastDir = ''
for dir_name in subdirs:
file_name = '{0}/evaluation.npz'.format(dir_name)
if not os.path.exists(file_name):
continue
vars_dict = np.load(file_name)
inputDir = vars_dict['inputDir']
lastDir = dir_name
if 'letterKeypointHistogram' in vars_dict.keys():
letterKeypointHistogram = vars_dict['letterKeypointHistogram']
letterKeypointHistogram = dict(letterKeypointHistogram.tolist())
print(lastDir)
missing_letters = vars_dict['missing_letters']
missing_letters = dict(missing_letters.tolist())
segmDir = '{0}/segmentations'.format(inputDir)
segmDir = '/datagrid/personal/TextSpotter/evaluation-sets/icdar2013-Test/segmentations'
keys = []
ticks = []
values = []
values.append([])
values.append([])
values.append([])
values.append([])
ticks.append([])
ticks.append([])
ticks.append([])
ticks.append([])
listlen = 0
for letter in letterKeypointHistogram.keys():
keys.append(letter)
values[0].append(0)
ticks[0].append(listlen)
values[1].append(0)
ticks[1].append(listlen + 0.2)
values[2].append(0)
ticks[2].append(listlen + 0.4)
values[3].append(0)
ticks[3].append(listlen + 0.6)
for num in letterKeypointHistogram[letter].keys():
values[num][listlen] = letterKeypointHistogram[letter][num]
listlen += 1
indices = sorted(range(len(values[0])),key=lambda x:values[0][x])
indices.reverse()
border = 15
missLetter = []
imagesMiss = {}
for letter in np.asarray(keys)[np.asarray(indices)]:
if not missing_letters.has_key(letter):
continue
arr = missing_letters[letter]
for i in range(len(arr)):
miss = arr[i]
if len(segmList) > 0:
base = os.path.basename(miss[0])
if not base in segmList:
continue
missLetter.append(miss)
if imagesMiss.has_key(miss[0]):
imagesMiss[miss[0]].append( miss[1] )
else:
imagesMiss[miss[0]] = []
imagesMiss[miss[0]].append( miss[1] )
rowSize = len(imagesMiss.keys())
f, axes = plt.subplots(2, len(imagesMiss.keys()))
plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None)
figNo = 0
for image in imagesMiss.keys():
if len(imagesMiss.keys()) > 1:
ax0 = axes[0][figNo]
ax = axes[1][figNo]
else:
ax0 = axes[figNo]
ax = axes[figNo]
figNo += 1
if color == 1:
img = cv2.imread(image)
else:
img = cv2.imread(image, 0)
baseName = os.path.basename(image)
baseName = baseName[:-4]
segmImg = '{0}/{1}_GT.bmp'.format(segmDir, baseName)
if not os.path.exists(segmImg):
segmImg = '{0}/gt_{1}.png'.format(segmDir, baseName)
segmImg = cv2.imread(segmImg)
segmentations = ft.getCharSegmentations(img)
keypoints = ft.getLastDetectionKeypoints()
if color == 1:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
for i in range(len(imagesMiss[image])):
if i == 0:
orBox = imagesMiss[image][0]
else:
orBox = utils.union(orBox, imagesMiss[image][i])
gt0 = orBox
gt = [gt0[0] - border, gt0[1] - border, gt0[2] + border, gt0[3] + border ]
gt[0] = max(0, gt[0])
gt[1] = max(0, gt[1])
gt[2] = min(img.shape[1], gt[2])
gt[3] = min(img.shape[0], gt[3])
zoom = img[gt[1]:gt[3], gt[0]:gt[2]]
ax.imshow(zoom, cmap=pylab.gray(), interpolation='nearest')
ax0.imshow(zoom, cmap=pylab.gray(), interpolation='nearest')
centers = segmImg[keypoints[:, 1].astype(int), keypoints[:, 0].astype(int)]
keypointsInsideMask = centers == (255, 255, 255)
keypointsInsideMask = np.invert(np.bitwise_and(np.bitwise_and(keypointsInsideMask[:, 0], keypointsInsideMask[:, 1]), keypointsInsideMask[:, 2]))
keypointsInside = keypoints[keypointsInsideMask, :]
mask = (keypoints[:, 0] > gt[0]) * (keypoints[:, 0] < gt[2]) * (keypoints[:, 1] > gt[1]) * (keypoints[:, 1] < gt[3])
kpMask = keypoints[mask]
kpMask[:, 0] = kpMask[:, 0] - gt[0]
kpMask[:, 1] = kpMask[:, 1] - gt[1]
kpMask[:, 7] = kpMask[:, 7] - gt[0]
kpMask[:, 8] = kpMask[:, 8] - gt[1]
ax.plot(kpMask[:, 0], kpMask[:, 1], 'ro')
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax0.xaxis.set_ticklabels([])
ax0.yaxis.set_ticklabels([])
for k in range(kpMask.shape[0]):
ax.plot([kpMask[k,0], kpMask[k,7]], [kpMask[k,1], kpMask[k,8]], 'r-')
style = 'rx'
if kpMask.shape[1] > 9:
for k in range(3):
maski = kpMask[:, 9] == k + 1
if k == 1:
style = "rv"
if k == 2:
style = "rs"
if k == 4:
style = "bo"
if k == 5:
style = "yo"
ax.plot([kpMask[maski,7]], [kpMask[maski,8]], style)
for i in range(len(imagesMiss[image])):
gt0 = imagesMiss[image][i]
mask = (keypointsInside[:, 0] > gt[0]) * (keypointsInside[:, 0] < gt[2]) * (keypointsInside[:, 1] > gt[1]) * (keypointsInside[:, 1] < gt[3])
kpMask = keypointsInside[mask]
keypointsInside[:, 0] = keypointsInside[:, 0] - gt[0]
keypointsInside[:, 1] = keypointsInside[:, 1] - gt[1]
keypointsInside[:, 7] = keypointsInside[:, 7] - gt[0]
keypointsInside[:, 8] = keypointsInside[:, 8] - gt[1]
ax.plot(keypointsInside[:, 0], keypointsInside[:, 1], 'go')
for k in range(keypointsInside.shape[0]):
ax.plot([keypointsInside[k,0], keypointsInside[k,7]], [keypointsInside[k,1], keypointsInside[k,8]], 'g-')
ax.set_xlim(0, gt[2] - max(0, gt[0]))
ax.set_ylim((gt[3] - max(0, gt[1]), 0))
line = mlines.Line2D(np.array([gt0[0] - gt[0], gt0[2] - gt[0], gt0[2] - gt[0], gt0[0] - gt[0], gt0[0] - gt[0]]), np.array([gt0[1] - gt[1], gt0[1] - gt[1], gt0[3] - gt[1], gt0[3] - gt[1], gt0[1] - gt[1]]), lw=5., alpha=0.6, color='r')
ax0.add_line(line)
plt.show()
def computeWordOvelap(imgc, word_gt, words, wordsOk, wordsFp):
best_match = 0
best_match2 = 0
for det_word in words:
try:
cv2.rectangle(imgc, (det_word[0], det_word[1]), (det_word[2], det_word[3]), (0, 0, 255))
for gt_box in word_gt:
rect_int = utils.intersect( det_word, gt_box )
int_area = utils.area(rect_int)
union_area = utils.area(utils.union(det_word, gt_box))
ratio = int_area / float(union_area)
ratio2 = int_area / utils.area(gt_box)
if ratio > best_match:
best_match = ratio
w = det_word
best_match2 = ratio2
if best_match2 > 0.3:
wordsOk.append(det_word)
elif best_match == 0:
wordsFp.append(det_word)
except:
pass
return (best_match, best_match2)
evalPunctuation = False
def computeSegmOverlap(gt_rects, segmentations, MIN_SEGM_OVRLAP = 0.6):
segm2chars = 0
for k in range(len(gt_rects)):
gt_rect = gt_rects[k]
best_match = 0
best_match_line = 0
if (gt_rect[4] == ',' or gt_rect[4] == '.' or gt_rect[4] == '\'' or gt_rect[4] == ':' or gt_rect[4] == '-') and not evalPunctuation:
continue
best_match2 = 0
for detId in range(segmentations.shape[0]):
rectn = segmentations[detId, :]
rect_int = utils.intersect( rectn, gt_rect )
int_area = utils.area(rect_int)
union_area = utils.area(utils.union(rectn, gt_rect))
ratio = int_area / float(union_area)
if ratio > best_match:
best_match = ratio
if ratio > best_match_line and rectn[7] == 1.0 :
best_match_line = ratio
gt_rect[5] = best_match
if best_match < MIN_SEGM_OVRLAP:
if k < len(gt_rects) - 1:
gt_rect2 = gt_rects[k + 1]
chars2Rect = utils.union(gt_rect2, gt_rect)
rect_int = utils.intersect( rectn, chars2Rect )
int_area = utils.area(rect_int)
union_area = utils.area(utils.union(rectn, chars2Rect))
ratio = int_area / float(union_area)
if ratio > best_match2:
if ratio > MIN_SEGM_OVRLAP:
segm2chars += 1
best_match2 = ratio
gt_rect[5] = ratio
gt_rect2[5] = ratio
def read_segm_data(input_dir, prefix = ""):
d=input_dir
subdirs = [os.path.join(d,o) for o in os.listdir(d) if os.path.isdir(os.path.join(d,o))]
subdirs = np.sort(subdirs)
ms = []
dirs = []
for dir_name in subdirs:
inputFile = '{0}/evaluation.npz'.format(dir_name)
if not os.path.exists(inputFile):
continue
vars_dict = np.load(inputFile)
missing_segm = vars_dict['missing_segm']
missing_segm = dict(missing_segm.tolist())
ms.append(missing_segm)
dirs.append(prefix + os.path.basename(dir_name))
return (ms, dirs)
def compare_missed_segm(input_dir='/datagrid/personal/TextSpotter/FastTextEval/experiments/segmentation', input_dir2='/datagrid/personal/TextSpotter/FastTextEval/experiments/segmentationg', showPictures = False):
ft = FASTex()
(ms, dirs) = read_segm_data(input_dir)
(ms2, dirs2) = read_segm_data(input_dir2, 'g')
ms.extend(ms2)
dirs.extend(dirs2)
sumHash = {}
for j in np.arange(0, len(ms)):
missing_segm = ms[j]
for image in missing_segm.keys():
arr = missing_segm[image]
if not sumHash.has_key(image):
sumHash[image] = arr
continue
for i in range(len(arr)):
miss_gt = arr[i]
check = sumHash[image]
hasGt = False
for k in range(len(check)):
miss_gt2 = check[k]
if miss_gt == miss_gt2:
hasGt = True
if not hasGt:
sumHash[image].append(miss_gt)
missing_segm = ms[0]
data = []
dataf = []
gt_id = 0
columns = ['Img', 'GT Id']
for image in sumHash.keys():
arr = sumHash[image]
f = None
for i in range(len(arr)):
orValue = False
miss_gt = arr[i]
row = []
row.append(os.path.basename(image))
row.append(gt_id)
gt_id += 1
rowf = []
for j in np.arange(0, len(ms)):
if gt_id == 1:
columns.append(dirs[j])
msj = ms[j]
hasSegmj = True
val = 1
if msj.has_key(image):
arrj = msj[image]
for k in range(len(arrj)):
miss_gtj = arrj[k]
if miss_gtj == miss_gt:
hasSegmj = False
val = 0
break
row.append(hasSegmj)
rowf.append(val)
orValue = orValue or hasSegmj
if orValue:
rowf.append(1)
else:
rowf.append(0)
if showPictures:
img = cv2.imread(image)
imgg = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
if f == None:
f, axes = plt.subplots(1, 2, figsize=(16, 3))
f.suptitle('Missing segmentation: {0}'.format(image))
ax = axes[0]
ax.imshow(img, cmap=pylab.gray(), interpolation='nearest')
ax = axes[1]
ax.imshow(imgg, cmap=pylab.gray(), interpolation='nearest')
orBox = miss_gt
segmentations = ft.getCharSegmentations(imgg)
keypoints = ft.getLastDetectionKeypoints()
style = 'rx'
for k in range(5):
maski = keypoints[:, 9] == k + 1
if k == 1:
style = "rv"
if k == 2:
style = "ro"
if k == 4:
style = "bo"
ax.plot(keypoints[maski, 0], keypoints[maski, 1], style)
for k in range(keypoints.shape[0]):
ax.plot([keypoints[k,0], keypoints[k,7]], [keypoints[k,1], keypoints[k,8]], 'r-')
ax = axes[0]
else:
orBox = utils.union(orBox, miss_gt)
line = mlines.Line2D(np.array([miss_gt[0], miss_gt[2], miss_gt[2], miss_gt[0], miss_gt[0]]), np.array([miss_gt[1], miss_gt[1], miss_gt[3], miss_gt[3], miss_gt[1]]), lw=5., alpha=0.6, color='r')
ax.add_line(line)
row.append(orValue)
data.append(row)
dataf.append(rowf)
if f != None:
ax = axes[0]
ax.set_xlim(orBox[0] - 20, orBox[2] + 20)
ax.set_ylim(orBox[3] + 20, orBox[1] - 20)
ax = axes[1]
ax.set_xlim(orBox[0] - 20, orBox[2] + 20)
ax.set_ylim(orBox[3] + 20, orBox[1] - 20)
plt.show()
columns.append("OR")
data = np.array(data)
dataf = np.array(dataf)
df = pandas.DataFrame(data = data, columns=columns)
#print(df)
sumCols = dataf.sum(0)
sumCols = dataf.shape[0] - sumCols
print("Missing Segmentations:")
print(sumCols)
indices = np.argsort(sumCols)
bestFactor = indices[1]
missing_segm = ms[bestFactor]
print( "Best factor: {0}".format(dirs[bestFactor]) )
maskBest = dataf[:, bestFactor] == 0
datafSec = dataf[maskBest, :]
sumCols = datafSec.sum(0)
sumCols = datafSec.shape[0] - sumCols
print("Missing Segmentations 2 best:")
print(sumCols)
indices = np.argsort(sumCols)
bestFactor2 = indices[1]
print( "Best factor 2: {0}, missing segmentations: {1} -> {2}".format(dirs[bestFactor2], datafSec.shape[0], sumCols[indices[1]]) )
maskBest = datafSec[:, bestFactor2] == 0
dataf3 = datafSec[maskBest, :]
sumCols = dataf3.sum(0)
sumCols = dataf3.shape[0] - sumCols
indices = np.argsort(sumCols)
bestFactor2 = indices[1]
print( "Best factor 3: {0}, missing segmentations: {1} -> {2}".format(dirs[bestFactor2], dataf3.shape[0], sumCols[indices[1]]) )
if __name__ == '__main__':
draw_missed_letters('/tmp/evalTest')
segmList = []
segmList.append( 'img_49.jpg' )
segmList.append( 'img_168.jpg' )
segmList.append( 'img_206.jpg' )
segmList.append( 'img_86.jpg' )
segmList.append( 'img_205.jpg' )
segmList.append( 'img_232.jpg' )
segmList.append( 'img_34.jpg' )
segmList.append( 'img_230.jpg' )
draw_missed_letters_figure(input_dir='/datagrid/personal/TextSpotter/FastTextEval/ICDAR-Test', color = 0, edgeThreshold = 13, inter = True, segmList=segmList)
'''
compare_missed_segm(input_dir='/datagrid/personal/TextSpotter/FastTextEval/experiments/segmentation', input_dir2='/datagrid/personal/TextSpotter/FastTextEval/experiments/segmentationg', showPictures = True)
plotSegmRoc('/datagrid/personal/TextSpotter/FastTextEval/experiments/segmentation')
'''
| gpl-2.0 | 3,569,044,324,166,943,000 | 38.57388 | 294 | 0.526004 | false |
elzaggo/pydoop | examples/pseudo_terasort/pteragen.py | 1 | 3734 | # BEGIN_COPYRIGHT
#
# Copyright 2009-2018 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""\
Generate a GraySort input data set.
The user specifies the number of rows and the output directory and this
class runs a map/reduce program to generate the data.
The format of the data is:
* (10 bytes key) (constant 2 bytes) (32 bytes rowid)
(constant 4 bytes) (48 bytes filler) (constant 4 bytes)
* The rowid is the right justified row id as a hex number.
"""
import struct
import random
import pydoop.mapreduce.api as api
import pydoop.mapreduce.pipes as pp
from ioformats import Writer
TERAGEN = "TERAGEN"
CHECKSUM = "CHECKSUM"
SEED = 423849
CACHE_SIZE = 16 * 1024
getrandbits = random.getrandbits
class GenSort(object):
"""\
Some sort of gensort look-alike. No idea on its statistical properties
"""
BREAK_BYTES = struct.pack("2B", 0x00, 0x11)
DATA_HEAD = struct.pack("4B", 0x88, 0x99, 0xAA, 0xBB)
DATA_TAIL = struct.pack("4B", 0xCC, 0xDD, 0xEE, 0xFF)
def __init__(self, seed, row, cache_size):
self.cache_size = cache_size
self.fmt = '0%dx' % (2 * self.cache_size)
self.row = row
self.cache = None
self.index = 0
# we use 10 (keys) + 6 (filler) random bytes per record
self.skip_ahead(16 * row)
random.seed(seed)
def update_cache(self):
r = getrandbits(8 * self.cache_size)
self.cache = format(r, self.fmt).encode('ascii')
def skip_ahead(self, skip):
"""\
Skip ahead skip random bytes
"""
chunks = skip // self.cache_size
cache_size_bits = 8 * self.cache_size
for _ in range(chunks):
getrandbits(cache_size_bits)
self.update_cache()
self.index = 2 * (skip - chunks * self.cache_size)
def next_random_block(self):
if self.index == 2 * self.cache_size:
self.update_cache()
self.index = 0
s, self.index = self.index, self.index + 32
return self.cache[s:self.index]
def generate_record(self):
# 10 bytes of random
# 2 constant bytes
# 32 bytes record number as an ASCII-encoded 32-digit hexadecimal
# 4 bytes of break data
# 48 bytes of filler based on low 48 bits of random
# 4 bytes of break data
rnd = self.next_random_block()
key = rnd[:10]
low = rnd[-12:]
row_id = format(self.row, '032x').encode('ascii')
filler = bytes(sum(map(list, zip(low, low, low, low)), []))
value = (self.BREAK_BYTES + row_id +
self.DATA_HEAD + filler + self.DATA_TAIL)
self.row = self.row + 1
return key, value
class Mapper(api.Mapper):
def __init__(self, context):
super(Mapper, self).__init__(context)
self.gensort = None
def map(self, context):
if self.gensort is None:
row = struct.unpack('>q', context.key)[0]
self.gensort = GenSort(SEED, row, CACHE_SIZE)
key, value = self.gensort.generate_record()
context.emit(key, value)
factory = pp.Factory(mapper_class=Mapper, record_writer_class=Writer)
def __main__():
pp.run_task(factory, auto_serialize=False)
| apache-2.0 | -846,706,939,941,173,600 | 29.606557 | 77 | 0.631494 | false |
timothycrosley/hug | hug/interface.py | 1 | 40948 | """hug/interface.py
Defines the various interface hug provides to expose routes to functions
Copyright (C) 2016 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
import argparse
import asyncio
import os
import sys
from collections import OrderedDict
from functools import lru_cache, partial, wraps
import falcon
from falcon import HTTP_BAD_REQUEST
import hug._empty as empty
import hug.api
import hug.output_format
import hug.types as types
from hug import introspect
from hug.exceptions import InvalidTypeData
from hug.format import parse_content_type
from hug.types import (
MarshmallowInputSchema,
MarshmallowReturnSchema,
Multiple,
OneOf,
SmartBoolean,
Text,
text,
)
DOC_TYPE_MAP = {str: "String", bool: "Boolean", list: "Multiple", int: "Integer", float: "Float"}
def _doc(kind):
return DOC_TYPE_MAP.get(kind, kind.__doc__)
def asyncio_call(function, *args, **kwargs):
loop = asyncio.get_event_loop()
if loop.is_running():
return function(*args, **kwargs)
function = asyncio.ensure_future(function(*args, **kwargs), loop=loop)
loop.run_until_complete(function)
return function.result()
class Interfaces(object):
"""Defines the per-function singleton applied to hugged functions defining common data needed by all interfaces"""
def __init__(self, function, args=None):
self.api = hug.api.from_object(function)
self.spec = getattr(function, "original", function)
self.arguments = introspect.arguments(function)
self.name = introspect.name(function)
self._function = function
self.is_coroutine = introspect.is_coroutine(self.spec)
if self.is_coroutine:
self.spec = getattr(self.spec, "__wrapped__", self.spec)
self.takes_args = introspect.takes_args(self.spec)
self.takes_kwargs = introspect.takes_kwargs(self.spec)
self.parameters = list(introspect.arguments(self.spec, self.takes_kwargs + self.takes_args))
if self.takes_kwargs:
self.kwarg = self.parameters.pop(-1)
if self.takes_args:
self.arg = self.parameters.pop(-1)
self.parameters = tuple(self.parameters)
self.defaults = dict(zip(reversed(self.parameters), reversed(self.spec.__defaults__ or ())))
self.required = self.parameters[: -(len(self.spec.__defaults__ or ())) or None]
self.is_method = introspect.is_method(self.spec) or introspect.is_method(function)
if self.is_method:
self.required = self.required[1:]
self.parameters = self.parameters[1:]
self.all_parameters = set(self.parameters)
if self.spec is not function:
self.all_parameters.update(self.arguments)
if args is not None:
transformers = args
else:
transformers = self.spec.__annotations__
self.transform = transformers.get("return", None)
self.directives = {}
self.input_transformations = {}
for name, transformer in transformers.items():
if isinstance(transformer, str):
continue
elif hasattr(transformer, "directive"):
self.directives[name] = transformer
continue
if hasattr(transformer, "from_string"):
transformer = transformer.from_string
elif hasattr(transformer, "load"):
transformer = MarshmallowInputSchema(transformer)
elif hasattr(transformer, "deserialize"):
transformer = transformer.deserialize
self.input_transformations[name] = transformer
def __call__(__hug_internal_self, *args, **kwargs): # noqa: N805
""""Calls the wrapped function, uses __hug_internal_self incase self is passed in as a kwarg from the wrapper"""
if not __hug_internal_self.is_coroutine:
return __hug_internal_self._function(*args, **kwargs)
return asyncio_call(__hug_internal_self._function, *args, **kwargs)
class Interface(object):
"""Defines the basic hug interface object, which is responsible for wrapping a user defined function and providing
all the info requested in the function as well as the route
A Interface object should be created for every kind of protocal hug supports
"""
__slots__ = (
"interface",
"_api",
"defaults",
"parameters",
"required",
"_outputs",
"on_invalid",
"requires",
"validate_function",
"transform",
"examples",
"output_doc",
"wrapped",
"directives",
"all_parameters",
"raise_on_invalid",
"invalid_outputs",
"map_params",
"input_transformations",
)
def __init__(self, route, function):
if route.get("api", None):
self._api = route["api"]
if "examples" in route:
self.examples = route["examples"]
function_args = route.get("args")
if not hasattr(function, "interface"):
function.__dict__["interface"] = Interfaces(function, function_args)
self.interface = function.interface
self.requires = route.get("requires", ())
if "validate" in route:
self.validate_function = route["validate"]
if "output_invalid" in route:
self.invalid_outputs = route["output_invalid"]
if not "parameters" in route:
self.defaults = self.interface.defaults
self.parameters = self.interface.parameters
self.all_parameters = self.interface.all_parameters
self.required = self.interface.required
else:
self.defaults = route.get("defaults", {})
self.parameters = tuple(route["parameters"])
self.all_parameters = set(route["parameters"])
self.required = tuple(
[parameter for parameter in self.parameters if parameter not in self.defaults]
)
if "map_params" in route:
self.map_params = route["map_params"]
for interface_name, internal_name in self.map_params.items():
if internal_name in self.defaults:
self.defaults[interface_name] = self.defaults.pop(internal_name)
if internal_name in self.parameters:
self.parameters = [
interface_name if param == internal_name else param
for param in self.parameters
]
if internal_name in self.all_parameters:
self.all_parameters.remove(internal_name)
self.all_parameters.add(interface_name)
if internal_name in self.required:
self.required = tuple(
[
interface_name if param == internal_name else param
for param in self.required
]
)
reverse_mapping = {
internal: interface for interface, internal in self.map_params.items()
}
self.input_transformations = {
reverse_mapping.get(name, name): transform
for name, transform in self.interface.input_transformations.items()
}
else:
self.map_params = {}
self.input_transformations = self.interface.input_transformations
if "output" in route:
self.outputs = route["output"]
self.transform = route.get("transform", None)
if self.transform is None and not isinstance(self.interface.transform, (str, type(None))):
self.transform = self.interface.transform
if hasattr(self.transform, "dump"):
self.transform = MarshmallowReturnSchema(self.transform)
self.output_doc = self.transform.__doc__
elif self.transform or self.interface.transform:
output_doc = self.transform or self.interface.transform
self.output_doc = output_doc if type(output_doc) is str else _doc(output_doc)
self.raise_on_invalid = route.get("raise_on_invalid", False)
if "on_invalid" in route:
self.on_invalid = route["on_invalid"]
elif self.transform:
self.on_invalid = self.transform
defined_directives = self.api.directives()
used_directives = set(self.parameters).intersection(defined_directives)
self.directives = {
directive_name: defined_directives[directive_name] for directive_name in used_directives
}
self.directives.update(self.interface.directives)
@property
def api(self):
return getattr(self, "_api", self.interface.api)
@property
def outputs(self):
return getattr(self, "_outputs", None)
@outputs.setter
def outputs(self, outputs):
self._outputs = outputs # pragma: no cover - generally re-implemented by sub classes
def validate(self, input_parameters, context):
"""Runs all set type transformers / validators against the provided input parameters and returns any errors"""
errors = {}
for key, type_handler in self.input_transformations.items():
if self.raise_on_invalid:
if key in input_parameters:
input_parameters[key] = self.initialize_handler(
type_handler, input_parameters[key], context=context
)
else:
try:
if key in input_parameters:
input_parameters[key] = self.initialize_handler(
type_handler, input_parameters[key], context=context
)
except InvalidTypeData as error:
errors[key] = error.reasons or str(error)
except Exception as error:
if hasattr(error, "args") and error.args:
errors[key] = error.args[0]
else:
errors[key] = str(error)
for require in self.required:
if not require in input_parameters:
errors[require] = "Required parameter '{}' not supplied".format(require)
if not errors and getattr(self, "validate_function", False):
errors = self.validate_function(input_parameters)
return errors
def check_requirements(self, request=None, response=None, context=None):
"""Checks to see if all requirements set pass
if all requirements pass nothing will be returned
otherwise, the error reported will be returned
"""
for requirement in self.requires:
conclusion = requirement(
response=response, request=request, context=context, module=self.api.module
)
if conclusion and conclusion is not True:
return conclusion
def documentation(self, add_to=None):
"""Produces general documentation for the interface"""
doc = OrderedDict() if add_to is None else add_to
usage = self.interface.spec.__doc__
if usage:
doc["usage"] = usage
if getattr(self, "requires", None):
doc["requires"] = [
getattr(requirement, "__doc__", requirement.__name__)
for requirement in self.requires
]
doc["outputs"] = OrderedDict()
doc["outputs"]["format"] = _doc(self.outputs)
doc["outputs"]["content_type"] = self.outputs.content_type
parameters = [
param
for param in self.parameters
if not param in ("request", "response", "self")
and not param in ("api_version", "body")
and not param.startswith("hug_")
and not hasattr(param, "directive")
]
if parameters:
inputs = doc.setdefault("inputs", OrderedDict())
types = self.interface.spec.__annotations__
for argument in parameters:
kind = types.get(self._remap_entry(argument), text)
if getattr(kind, "directive", None) is True:
continue
input_definition = inputs.setdefault(argument, OrderedDict())
input_definition["type"] = kind if isinstance(kind, str) else _doc(kind)
default = self.defaults.get(argument, None)
if default is not None:
input_definition["default"] = default
return doc
def _rewrite_params(self, params):
for interface_name, internal_name in self.map_params.items():
if interface_name in params:
params[internal_name] = params.pop(interface_name)
def _remap_entry(self, interface_name):
return self.map_params.get(interface_name, interface_name)
@staticmethod
def cleanup_parameters(parameters, exception=None):
for _parameter, directive in parameters.items():
if hasattr(directive, "cleanup"):
directive.cleanup(exception=exception)
@staticmethod
def initialize_handler(handler, value, context):
try: # It's easier to ask for forgiveness than for permission
return handler(value, context=context)
except TypeError:
return handler(value)
class Local(Interface):
"""Defines the Interface responsible for exposing functions locally"""
__slots__ = ("skip_directives", "skip_validation", "version")
def __init__(self, route, function):
super().__init__(route, function)
self.version = route.get("version", None)
if "skip_directives" in route:
self.skip_directives = True
if "skip_validation" in route:
self.skip_validation = True
self.interface.local = self
def __get__(self, instance, kind):
"""Support instance methods"""
return partial(self.__call__, instance) if instance else self.__call__
@property
def __name__(self):
return self.interface.spec.__name__
@property
def __module__(self):
return self.interface.spec.__module__
def __call__(self, *args, **kwargs):
context = self.api.context_factory(api=self.api, api_version=self.version, interface=self)
"""Defines how calling the function locally should be handled"""
for _requirement in self.requires:
lacks_requirement = self.check_requirements(context=context)
if lacks_requirement:
self.api.delete_context(context, lacks_requirement=lacks_requirement)
return self.outputs(lacks_requirement) if self.outputs else lacks_requirement
for index, argument in enumerate(args):
kwargs[self.parameters[index]] = argument
if not getattr(self, "skip_directives", False):
for parameter, directive in self.directives.items():
if parameter in kwargs:
continue
arguments = (self.defaults[parameter],) if parameter in self.defaults else ()
kwargs[parameter] = directive(
*arguments,
api=self.api,
api_version=self.version,
interface=self,
context=context
)
if not getattr(self, "skip_validation", False):
errors = self.validate(kwargs, context)
if errors:
errors = {"errors": errors}
if getattr(self, "on_invalid", False):
errors = self.on_invalid(errors)
outputs = getattr(self, "invalid_outputs", self.outputs)
self.api.delete_context(context, errors=errors)
return outputs(errors) if outputs else errors
self._rewrite_params(kwargs)
try:
result = self.interface(**kwargs)
if self.transform:
if hasattr(self.transform, "context"):
self.transform.context = context
result = self.transform(result)
except Exception as exception:
self.cleanup_parameters(kwargs, exception=exception)
self.api.delete_context(context, exception=exception)
raise exception
self.cleanup_parameters(kwargs)
self.api.delete_context(context)
return self.outputs(result) if self.outputs else result
class CLI(Interface):
"""Defines the Interface responsible for exposing functions to the CLI"""
def __init__(self, route, function):
super().__init__(route, function)
if not self.outputs:
self.outputs = self.api.cli.output_format
self.interface.cli = self
self.reaffirm_types = {}
use_parameters = list(self.interface.parameters)
self.additional_options = getattr(
self.interface, "arg", getattr(self.interface, "kwarg", False)
)
if self.additional_options:
use_parameters.append(self.additional_options)
used_options = {"h", "help"}
nargs_set = self.interface.takes_args or self.interface.takes_kwargs
class CustomArgumentParser(argparse.ArgumentParser):
exit_callback = None
def exit(self, status=0, message=None):
if self.exit_callback:
self.exit_callback(message)
super().exit(status, message)
self.parser = CustomArgumentParser(
description=route.get("doc", self.interface.spec.__doc__)
)
if "version" in route:
self.parser.add_argument(
"-v",
"--version",
action="version",
version="{0} {1}".format(
route.get("name", self.interface.spec.__name__), route["version"]
),
)
used_options.update(("v", "version"))
self.context_tranforms = []
for option in use_parameters:
if option in self.directives:
continue
if option in self.interface.required or option == self.additional_options:
args = (option,)
else:
short_option = option[0]
while short_option in used_options and len(short_option) < len(option):
short_option = option[: len(short_option) + 1]
used_options.add(short_option)
used_options.add(option)
if short_option != option:
args = ("-{0}".format(short_option), "--{0}".format(option))
else:
args = ("--{0}".format(option),)
kwargs = {}
if option in self.defaults:
kwargs["default"] = self.defaults[option]
if option in self.interface.input_transformations:
transform = self.interface.input_transformations[option]
kwargs["type"] = transform
kwargs["help"] = _doc(transform)
if transform in (list, tuple) or isinstance(transform, types.Multiple):
kwargs["action"] = "append"
kwargs["type"] = Text()
self.reaffirm_types[option] = transform
elif transform == bool or isinstance(transform, type(types.boolean)):
kwargs["action"] = "store_true"
self.reaffirm_types[option] = transform
elif isinstance(transform, types.OneOf):
kwargs["choices"] = transform.values
elif (
option in self.interface.spec.__annotations__
and type(self.interface.spec.__annotations__[option]) == str
):
kwargs["help"] = option
if (
kwargs.get("type", None) == bool or kwargs.get("action", None) == "store_true"
) and not kwargs["default"]:
kwargs["action"] = "store_true"
kwargs.pop("type", None)
elif kwargs.get("action", None) == "store_true":
kwargs.pop("action", None)
if option == self.additional_options:
kwargs["nargs"] = "*"
elif (
not nargs_set
and kwargs.get("action", None) == "append"
and not option in self.interface.defaults
):
kwargs["nargs"] = "*"
kwargs.pop("action", "")
nargs_set = True
self.parser.add_argument(*args, **kwargs)
self.api.cli.commands[route.get("name", self.interface.spec.__name__)] = self
def output(self, data, context):
"""Outputs the provided data using the transformations and output format specified for this CLI endpoint"""
if self.transform:
if hasattr(self.transform, "context"):
self.transform.context = context
data = self.transform(data)
if hasattr(data, "read"):
data = data.read().decode("utf8")
if data is not None:
data = self.outputs(data)
if data:
sys.stdout.buffer.write(data)
if not data.endswith(b"\n"):
sys.stdout.buffer.write(b"\n")
return data
def __str__(self):
return self.parser.description or ""
def __call__(self):
"""Calls the wrapped function through the lens of a CLI ran command"""
context = self.api.context_factory(api=self.api, argparse=self.parser, interface=self)
def exit_callback(message):
self.api.delete_context(context, errors=message)
self.parser.exit_callback = exit_callback
self.api._ensure_started()
for requirement in self.requires:
conclusion = requirement(request=sys.argv, module=self.api.module, context=context)
if conclusion and conclusion is not True:
self.api.delete_context(context, lacks_requirement=conclusion)
return self.output(conclusion, context)
if self.interface.is_method:
self.parser.prog = "%s %s" % (self.api.module.__name__, self.interface.name)
known, unknown = self.parser.parse_known_args()
pass_to_function = vars(known)
for option, directive in self.directives.items():
arguments = (self.defaults[option],) if option in self.defaults else ()
pass_to_function[option] = directive(
*arguments, api=self.api, argparse=self.parser, context=context, interface=self
)
for field, type_handler in self.reaffirm_types.items():
if field in pass_to_function:
if not pass_to_function[field] and type_handler in (
list,
tuple,
hug.types.Multiple,
):
pass_to_function[field] = type_handler(())
else:
pass_to_function[field] = self.initialize_handler(
type_handler, pass_to_function[field], context=context
)
if getattr(self, "validate_function", False):
errors = self.validate_function(pass_to_function)
if errors:
self.api.delete_context(context, errors=errors)
return self.output(errors, context)
args = None
if self.additional_options:
args = []
for parameter in self.interface.parameters:
if parameter in pass_to_function:
args.append(pass_to_function.pop(parameter))
args.extend(pass_to_function.pop(self.additional_options, ()))
if self.interface.takes_kwargs:
add_options_to = None
for option in unknown:
if option.startswith("--"):
if add_options_to:
value = pass_to_function[add_options_to]
if len(value) == 1:
pass_to_function[add_options_to] = value[0]
elif value == []:
pass_to_function[add_options_to] = True
add_options_to = option[2:]
pass_to_function.setdefault(add_options_to, [])
elif add_options_to:
pass_to_function[add_options_to].append(option)
self._rewrite_params(pass_to_function)
try:
if args:
result = self.output(self.interface(*args, **pass_to_function), context)
else:
result = self.output(self.interface(**pass_to_function), context)
except Exception as exception:
self.cleanup_parameters(pass_to_function, exception=exception)
self.api.delete_context(context, exception=exception)
raise exception
self.cleanup_parameters(pass_to_function)
self.api.delete_context(context)
return result
class HTTP(Interface):
"""Defines the interface responsible for wrapping functions and exposing them via HTTP based on the route"""
__slots__ = (
"_params_for_outputs_state",
"_params_for_invalid_outputs_state",
"_params_for_transform_state",
"_params_for_on_invalid",
"set_status",
"response_headers",
"transform",
"input_transformations",
"examples",
"wrapped",
"catch_exceptions",
"parse_body",
"private",
"on_invalid",
"inputs",
)
AUTO_INCLUDE = {"request", "response"}
def __init__(self, route, function, catch_exceptions=True):
super().__init__(route, function)
self.catch_exceptions = catch_exceptions
self.parse_body = "parse_body" in route
self.set_status = route.get("status", False)
self.response_headers = tuple(route.get("response_headers", {}).items())
self.private = "private" in route
self.inputs = route.get("inputs", {})
if "on_invalid" in route:
self._params_for_on_invalid = introspect.takes_arguments(
self.on_invalid, *self.AUTO_INCLUDE
)
elif self.transform:
self._params_for_on_invalid = self._params_for_transform
self.api.http.versions.update(route.get("versions", (None,)))
self.interface.http = self
@property
def _params_for_outputs(self):
if not hasattr(self, "_params_for_outputs_state"):
self._params_for_outputs_state = introspect.takes_arguments(
self.outputs, *self.AUTO_INCLUDE
)
return self._params_for_outputs_state
@property
def _params_for_invalid_outputs(self):
if not hasattr(self, "_params_for_invalid_outputs_state"):
self._params_for_invalid_outputs_state = introspect.takes_arguments(
self.invalid_outputs, *self.AUTO_INCLUDE
)
return self._params_for_invalid_outputs_state
@property
def _params_for_transform(self):
if not hasattr(self, "_params_for_transform_state"):
self._params_for_transform_state = introspect.takes_arguments(
self.transform, *self.AUTO_INCLUDE
)
return self._params_for_transform_state
def gather_parameters(self, request, response, context, api_version=None, **input_parameters):
"""Gathers and returns all parameters that will be used for this endpoint"""
input_parameters.update(request.params)
if self.parse_body and request.content_length:
body = request.bounded_stream
content_type, content_params = parse_content_type(request.content_type)
body_formatter = body and self.inputs.get(
content_type, self.api.http.input_format(content_type)
)
if body_formatter:
body = body_formatter(body, content_length=request.content_length, **content_params)
if "body" in self.all_parameters:
input_parameters["body"] = body
if isinstance(body, dict):
input_parameters.update(body)
elif "body" in self.all_parameters:
input_parameters["body"] = None
if "request" in self.all_parameters:
input_parameters["request"] = request
if "response" in self.all_parameters:
input_parameters["response"] = response
if "api_version" in self.all_parameters:
input_parameters["api_version"] = api_version
for parameter, directive in self.directives.items():
arguments = (self.defaults[parameter],) if parameter in self.defaults else ()
input_parameters[parameter] = directive(
*arguments,
response=response,
request=request,
api=self.api,
api_version=api_version,
context=context,
interface=self
)
return input_parameters
@property
def outputs(self):
return getattr(self, "_outputs", self.api.http.output_format)
@outputs.setter
def outputs(self, outputs):
self._outputs = outputs
def transform_data(self, data, request=None, response=None, context=None):
transform = self.transform
if hasattr(transform, "context"):
self.transform.context = context
"""Runs the transforms specified on this endpoint with the provided data, returning the data modified"""
if transform and not (isinstance(transform, type) and isinstance(data, transform)):
if self._params_for_transform:
return transform(
data, **self._arguments(self._params_for_transform, request, response)
)
else:
return transform(data)
return data
def content_type(self, request=None, response=None):
"""Returns the content type that should be used by default for this endpoint"""
if callable(self.outputs.content_type):
return self.outputs.content_type(request=request, response=response)
else:
return self.outputs.content_type
def invalid_content_type(self, request=None, response=None):
"""Returns the content type that should be used by default on validation errors"""
if callable(self.invalid_outputs.content_type):
return self.invalid_outputs.content_type(request=request, response=response)
else:
return self.invalid_outputs.content_type
def _arguments(self, requested_params, request=None, response=None):
if requested_params:
arguments = {}
if "response" in requested_params:
arguments["response"] = response
if "request" in requested_params:
arguments["request"] = request
return arguments
return empty.dict
def set_response_defaults(self, response, request=None):
"""Sets up the response defaults that are defined in the URL route"""
for header_name, header_value in self.response_headers:
response.set_header(header_name, header_value)
if self.set_status:
response.status = self.set_status
response.content_type = self.content_type(request, response)
def render_errors(self, errors, request, response):
data = {"errors": errors}
if getattr(self, "on_invalid", False):
data = self.on_invalid(
data, **self._arguments(self._params_for_on_invalid, request, response)
)
response.status = HTTP_BAD_REQUEST
if getattr(self, "invalid_outputs", False):
response.content_type = self.invalid_content_type(request, response)
response.data = self.invalid_outputs(
data, **self._arguments(self._params_for_invalid_outputs, request, response)
)
else:
response.data = self.outputs(
data, **self._arguments(self._params_for_outputs, request, response)
)
def call_function(self, parameters):
if not self.interface.takes_kwargs:
parameters = {
key: value for key, value in parameters.items() if key in self.all_parameters
}
self._rewrite_params(parameters)
return self.interface(**parameters)
def render_content(self, content, context, request, response, **kwargs):
if hasattr(content, "interface") and (
content.interface is True or hasattr(content.interface, "http")
):
if content.interface is True:
content(request, response, api_version=None, **kwargs)
else:
content.interface.http(request, response, api_version=None, **kwargs)
return
content = self.transform_data(content, request, response, context)
content = self.outputs(
content, **self._arguments(self._params_for_outputs, request, response)
)
if hasattr(content, "read"):
size = None
if hasattr(content, "name") and os.path.isfile(content.name):
size = os.path.getsize(content.name)
if request.range and size:
start, end = request.range
if end < 0:
end = size + end
end = min(end, size)
length = end - start + 1
content.seek(start)
response.data = content.read(length)
response.status = falcon.HTTP_206
response.content_range = (start, end, size)
content.close()
else:
if size:
response.set_stream(content, size)
else:
response.stream = content # pragma: no cover
else:
response.data = content
def __call__(self, request, response, api_version=None, **kwargs):
context = self.api.context_factory(
response=response,
request=request,
api=self.api,
api_version=api_version,
interface=self,
)
"""Call the wrapped function over HTTP pulling information as needed"""
if isinstance(api_version, str) and api_version.isdigit():
api_version = int(api_version)
else:
api_version = None
if not self.catch_exceptions:
exception_types = ()
else:
exception_types = self.api.http.exception_handlers(api_version)
exception_types = tuple(exception_types.keys()) if exception_types else ()
input_parameters = {}
try:
self.set_response_defaults(response, request)
lacks_requirement = self.check_requirements(request, response, context)
if lacks_requirement:
response.data = self.outputs(
lacks_requirement,
**self._arguments(self._params_for_outputs, request, response)
)
self.api.delete_context(context, lacks_requirement=lacks_requirement)
return
input_parameters = self.gather_parameters(
request, response, context, api_version, **kwargs
)
errors = self.validate(input_parameters, context)
if errors:
self.api.delete_context(context, errors=errors)
return self.render_errors(errors, request, response)
self.render_content(
self.call_function(input_parameters), context, request, response, **kwargs
)
except falcon.HTTPNotFound as exception:
self.cleanup_parameters(input_parameters, exception=exception)
self.api.delete_context(context, exception=exception)
return self.api.http.not_found(request, response, **kwargs)
except exception_types as exception:
self.cleanup_parameters(input_parameters, exception=exception)
self.api.delete_context(context, exception=exception)
handler = None
exception_type = type(exception)
if exception_type in exception_types:
handler = self.api.http.exception_handlers(api_version)[exception_type][0]
else:
for match_exception_type, exception_handlers in tuple(
self.api.http.exception_handlers(api_version).items()
)[::-1]:
if isinstance(exception, match_exception_type):
for potential_handler in exception_handlers:
if not isinstance(exception, potential_handler.exclude):
handler = potential_handler
if not handler:
raise exception
handler(request=request, response=response, exception=exception, **kwargs)
except Exception as exception:
self.cleanup_parameters(input_parameters, exception=exception)
self.api.delete_context(context, exception=exception)
raise exception
self.cleanup_parameters(input_parameters)
self.api.delete_context(context)
def documentation(self, add_to=None, version=None, prefix="", base_url="", url=""):
"""Returns the documentation specific to an HTTP interface"""
doc = OrderedDict() if add_to is None else add_to
usage = self.interface.spec.__doc__
if usage:
doc["usage"] = usage
for example in self.examples:
example_text = "{0}{1}{2}{3}".format(
prefix, base_url, "/v{0}".format(version) if version else "", url
)
if isinstance(example, str):
example_text += "?{0}".format(example)
doc_examples = doc.setdefault("examples", [])
if not example_text in doc_examples:
doc_examples.append(example_text)
doc = super().documentation(doc)
if getattr(self, "output_doc", ""):
doc["outputs"]["type"] = self.output_doc
return doc
@lru_cache()
def urls(self, version=None):
"""Returns all URLS that are mapped to this interface"""
urls = []
for _base_url, routes in self.api.http.routes.items():
for url, methods in routes.items():
for _method, versions in methods.items():
for interface_version, interface in versions.items():
if interface_version == version and interface == self:
if not url in urls:
urls.append(("/v{0}".format(version) if version else "") + url)
return urls
def url(self, version=None, **kwargs):
"""Returns the first matching URL found for the specified arguments"""
for url in self.urls(version):
if [key for key in kwargs.keys() if not "{" + key + "}" in url]:
continue
return url.format(**kwargs)
raise KeyError("URL that takes all provided parameters not found")
class ExceptionRaised(HTTP):
"""Defines the interface responsible for taking and transforming exceptions that occur during processing"""
__slots__ = ("handle", "exclude")
def __init__(self, route, *args, **kwargs):
self.handle = route["exceptions"]
self.exclude = route["exclude"]
super().__init__(route, *args, **kwargs)
| mit | 2,164,024,948,178,600,700 | 39.623016 | 120 | 0.582275 | false |
jmbowman/media_library | settings.py | 1 | 3402 | # Django settings for app project.
import os
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = False
# Use the new support for timezones in Django 1.4
USE_TZ = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.dirname(__file__) + '/files/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/files/'
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
LOGIN_REDIRECT_URL = '/library/'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
ROOT_URLCONF = 'urls'
# URL for static media files
STATIC_URL = '/static/'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.request',
'django.core.context_processors.static',
)
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.dirname(__file__) + "/templates",
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'uni_form',
'library',
'debug_toolbar',
)
# Directories outside of individual apps in which static files are found
STATICFILES_DIRS = (
os.path.dirname(__file__) + '/static/',
)
# destination path for static media files on file server
STATIC_ROOT = '/local/static/library/static/'
# Needed for django-debug-toolbar
INTERNAL_IPS = ('127.0.0.1',)
# Finally, grab local settings from your local settings
try:
from local_settings import *
except ImportError:
pass
| mit | -8,985,655,191,693,617,000 | 29.927273 | 88 | 0.727513 | false |
micha-shepher/oervoer-wizard | oervoer-django/oervoer/src/oervoer/settings.py | 1 | 2765 | """
Django settings for oervoer project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
bojan: 06 21543084
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 're78rq!(q%1zvygez@83+9wu+$ew$!hy(v&)4_wkctte-qhyhe'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_tables2',
'wizard',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'oervoer.urls'
WSGI_APPLICATION = 'oervoer.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'oervoer.db'),
}
}
#TEMPLATE_CONTEXT_PROCESSORS =
#(
# 'django.core.context_processors.request',
#)
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
tcp = list(TEMPLATE_CONTEXT_PROCESSORS)
tcp.append('django.core.context_processors.request')
TEMPLATE_CONTEXT_PROCESSORS = tuple(tcp)
#TEMPLATE_CONTEXT_PROCESSORS =("django.contrib.auth.context_processors.auth",
#"django.core.context_processors.debug",
#"django.core.context_processors.i18n",
#"django.core.context_processors.media",
#"django.core.context_processors.static",
#"django.core.context_processors.tz",
#"django.contrib.messages.context_processors.messages",
#'django.core.context_processors.request',)
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'CET'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| gpl-3.0 | -1,681,099,818,332,427,000 | 26.376238 | 77 | 0.739241 | false |
jamescallmebrent/dagny | src/dagny/renderer.py | 1 | 9212 | # -*- coding: utf-8 -*-
from functools import wraps
import odict
from dagny import conneg
class Skip(Exception):
"""
Move on to the next renderer backend.
This exception can be raised by a renderer backend to instruct the
`Renderer` to ignore the current backend and move on to the next-best one.
"""
class Renderer(object):
"""
Manage a collection of renderer backends, and their execution on an action.
A renderer backend is a callable which accepts an `Action` and a `Resource`
and returns an instance of `django.http.HttpResponse`. For example:
>>> def render_html(action, resource):
... from django.http import HttpResponse
... return HttpResponse(content="<html>...</html>")
Backends are associated with mimetypes on the `Renderer`, through mimetype
shortcodes (see `dagny.conneg` for more information on shortcodes). The
`Renderer` exports a dictionary-like interface for managing these
associations:
>>> r = Renderer()
>>> r['html'] = render_html
>>> r['html'] # doctest: +ELLIPSIS
<function render_html at 0x...>
>>> 'html' in r
True
>>> del r['html']
>>> r['html']
Traceback (most recent call last):
...
KeyError: 'html'
>>> 'html' in r
False
A few helpful dictionary methods have also been added, albeit
underscore-prefixed to prevent naming clashes. Behind the scenes, `Renderer`
uses [odict](http://pypi.python.org/pypi/odict), which will keep the keys in
the order they were *first* defined. Here are a few examples:
>>> r['html'] = 1
>>> r['json'] = 2
>>> r['xml'] = 3
>>> r._keys()
['html', 'json', 'xml']
>>> r._items()
[('html', 1), ('json', 2), ('xml', 3)]
>>> r._values()
[1, 2, 3]
This order preservation is useful for ConNeg, since you can define backends
in order of server preference and the negotiator will consider them
appropriately. You can push something to the end of the queue by removing
and then re-adding it:
>>> r['html'] = r._pop('html')
>>> r._keys()
['json', 'xml', 'html']
You can also define backends using a handy decorator-based syntax:
>>> @r.html
... def render_html_2(action, resource):
... from django.http import HttpResponse
... return HttpResponse(content="<html>...</html>")
>>> r['html'] is render_html_2
True
Remember that your shortcode *must* be pre-registered with
`dagny.conneg.MIMETYPES` for this to work, otherwise an `AttributeError`
will be raised. This also introduces the constraint that your shortcode must
be a valid Python identifier.
"""
def __init__(self, backends=None):
if backends is None:
backends = odict.odict()
else:
backends = backends.copy()
self._backends = backends
def __getattr__(self, shortcode):
"""
Support use of decorator syntax to define new renderer backends.
>>> r = Renderer()
>>> @r.html
... def render_html(action, resource):
... return "<html>...</html>"
>>> render_html # doctest: +ELLIPSIS
<function render_html at 0x...>
>>> r['html'] # doctest: +ELLIPSIS
<function render_html at 0x...>
>>> r['html'] is render_html
True
"""
if shortcode not in conneg.MIMETYPES:
raise AttributeError(shortcode)
def decorate(function):
self[shortcode] = function
return function
return decorate
def __call__(self, action, resource):
matches = self._match(action, resource)
for shortcode in matches:
try:
return self[shortcode](action, resource)
except Skip:
continue
return not_acceptable(action, resource)
def _match(self, action, resource):
"""Return all matching shortcodes for a given action and resource."""
matches = []
format_override = resource._format()
if format_override and (format_override in self._keys()):
matches.append(format_override)
accept_header = resource.request.META.get('HTTP_ACCEPT')
if accept_header:
matches.extend(conneg.match_accept(accept_header, self._keys()))
if (not matches) and ('html' in self):
matches.append('html')
return matches
def _bind(self, action):
"""
Bind this `Renderer` to an action, returning a `BoundRenderer`.
>>> r = Renderer()
>>> action = object()
>>> r['html'] = 1
>>> br = r._bind(action)
>>> br # doctest: +ELLIPSIS
<BoundRenderer on <object object at 0x...>>
Associations should be preserved, albeit on a copied `odict`, so that
modifications to the `BoundRenderer` do not propagate back to this.
>>> br['html']
1
>>> br['html'] = 2
>>> br['html']
2
>>> r['html']
1
>>> r['html'] = 3
>>> r['html']
3
>>> br['html']
2
"""
return BoundRenderer(action, backends=self._backends)
def _copy(self):
return type(self)(backends=self._backends)
### <meta>
#
# This chunk of code creates several proxy methods going through to
# `_backends`. A group of them are underscore-prefixed to prevent naming
# clashes with the `__getattr__`-based decorator syntax (so you could
# still associate a backend with a shortcode of 'pop', for example).
proxy = lambda meth: property(lambda self: getattr(self._backends, meth))
for method in ('__contains__', '__getitem__', '__setitem__', '__delitem__'):
vars()[method] = proxy(method)
for method in ('clear', 'get', 'items', 'iteritems', 'iterkeys',
'itervalues', 'keys', 'pop', 'popitem', 'ritems',
'riteritems', 'riterkeys', 'ritervalues', 'rkeys', 'rvalues',
'setdefault', 'sort', 'update', 'values'):
vars()['_' + method] = proxy(method)
_dict = proxy('as_dict')
del method, proxy
#
### </meta>
class BoundRenderer(Renderer):
def __init__(self, action, backends=None):
super(BoundRenderer, self).__init__(backends=backends)
self._action = action
def __repr__(self):
return "<BoundRenderer on %r>" % (self._action,)
def __getattr__(self, shortcode):
"""
Support use of decorator syntax to define new renderer backends.
In this case, decorated functions should be methods which operate on a
resource, and take no other arguments.
>>> action = object()
>>> r = BoundRenderer(action)
>>> old_action_id = id(action)
>>> @r.html
... def action(resource):
... return "<html>...</html>"
>>> id(action) == old_action_id # Object has not changed.
True
Functions will be wrapped internally, so that their function signature
is that of a generic renderer backend. Accessing the
>>> resource = object()
>>> r['html'](action, resource)
'<html>...</html>'
"""
if shortcode not in conneg.MIMETYPES:
raise AttributeError(shortcode)
def decorate(method):
self[shortcode] = resource_method_wrapper(method)
return self._action
return decorate
def __call__(self, resource):
return super(BoundRenderer, self).__call__(self._action, resource)
def resource_method_wrapper(method):
"""
Wrap a 0-ary resource method as a generic renderer backend.
>>> @resource_method_wrapper
... def func(resource):
... print repr(resource)
>>> action = "abc"
>>> resource = "def"
>>> func(action, resource)
'def'
"""
def generic_renderer_backend(action, resource):
return method(resource)
return generic_renderer_backend
def not_acceptable(action, resource):
"""Respond, indicating that no acceptable entity could be generated."""
from django.http import HttpResponse
response = HttpResponse(status=406) # Not Acceptable
del response['Content-Type']
return response
| unlicense | 6,525,417,132,887,821,000 | 29.006515 | 80 | 0.530504 | false |
gfyoung/pandas | pandas/tests/series/indexing/test_where.py | 1 | 13484 | import numpy as np
import pytest
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import Series, Timestamp, date_range, isna
import pandas._testing as tm
def test_where_unsafe_int(sint_dtype):
s = Series(np.arange(10), dtype=sint_dtype)
mask = s < 5
s[mask] = range(2, 7)
expected = Series(list(range(2, 7)) + list(range(5, 10)), dtype=sint_dtype)
tm.assert_series_equal(s, expected)
def test_where_unsafe_float(float_dtype):
s = Series(np.arange(10), dtype=float_dtype)
mask = s < 5
s[mask] = range(2, 7)
data = list(range(2, 7)) + list(range(5, 10))
expected = Series(data, dtype=float_dtype)
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize(
"dtype,expected_dtype",
[
(np.int8, np.float64),
(np.int16, np.float64),
(np.int32, np.float64),
(np.int64, np.float64),
(np.float32, np.float32),
(np.float64, np.float64),
],
)
def test_where_unsafe_upcast(dtype, expected_dtype):
# see gh-9743
s = Series(np.arange(10), dtype=dtype)
values = [2.5, 3.5, 4.5, 5.5, 6.5]
mask = s < 5
expected = Series(values + list(range(5, 10)), dtype=expected_dtype)
s[mask] = values
tm.assert_series_equal(s, expected)
def test_where_unsafe():
# see gh-9731
s = Series(np.arange(10), dtype="int64")
values = [2.5, 3.5, 4.5, 5.5]
mask = s > 5
expected = Series(list(range(6)) + values, dtype="float64")
s[mask] = values
tm.assert_series_equal(s, expected)
# see gh-3235
s = Series(np.arange(10), dtype="int64")
mask = s < 5
s[mask] = range(2, 7)
expected = Series(list(range(2, 7)) + list(range(5, 10)), dtype="int64")
tm.assert_series_equal(s, expected)
assert s.dtype == expected.dtype
s = Series(np.arange(10), dtype="int64")
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype="int64")
tm.assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
msg = "cannot assign mismatch length to masked array"
with pytest.raises(ValueError, match=msg):
s[mask] = [5, 4, 3, 2, 1]
with pytest.raises(ValueError, match=msg):
s[mask] = [0] * 5
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
tm.assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
assert isna(result)
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isna(s)]
expected = Series(np.nan, index=[9])
tm.assert_series_equal(result, expected)
def test_where():
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
tm.assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
tm.assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert s.shape == rs.shape
assert rs is not s
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
tm.assert_series_equal(rs, expected)
expected = s2.abs()
expected.iloc[0] = s2[0]
rs = s2.where(cond[:3], -s2)
tm.assert_series_equal(rs, expected)
def test_where_error():
s = Series(np.random.randn(5))
cond = s > 0
msg = "Array conditional must be same shape as self"
with pytest.raises(ValueError, match=msg):
s.where(1)
with pytest.raises(ValueError, match=msg):
s.where(cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
tm.assert_series_equal(s, expected)
# failures
msg = "cannot assign mismatch length to masked array"
with pytest.raises(ValueError, match=msg):
s[[True, False]] = [0, 2, 3]
msg = (
"NumPy boolean array indexing assignment cannot assign 0 input "
"values to the 1 output values where the mask is true"
)
with pytest.raises(ValueError, match=msg):
s[[True, False]] = []
@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
def test_where_array_like(klass):
# see gh-15414
s = Series([1, 2, 3])
cond = [False, True, True]
expected = Series([np.nan, 2, 3])
result = s.where(klass(cond))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"cond",
[
[1, 0, 1],
Series([2, 5, 7]),
["True", "False", "True"],
[Timestamp("2017-01-01"), pd.NaT, Timestamp("2017-01-02")],
],
)
def test_where_invalid_input(cond):
# see gh-15414: only boolean arrays accepted
s = Series([1, 2, 3])
msg = "Boolean array expected for the condition"
with pytest.raises(ValueError, match=msg):
s.where(cond)
msg = "Array conditional must be same shape as self"
with pytest.raises(ValueError, match=msg):
s.where([True])
def test_where_ndframe_align():
msg = "Array conditional must be same shape as self"
s = Series([1, 2, 3])
cond = [True]
with pytest.raises(ValueError, match=msg):
s.where(cond)
expected = Series([1, np.nan, np.nan])
out = s.where(Series(cond))
tm.assert_series_equal(out, expected)
cond = np.array([False, True, False, True])
with pytest.raises(ValueError, match=msg):
s.where(cond)
expected = Series([np.nan, 2, np.nan])
out = s.where(Series(cond))
tm.assert_series_equal(out, expected)
def test_where_setitem_invalid():
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
msg = (
lambda x: f"cannot set using a {x} indexer with a "
"different length than the value"
)
# slice
s = Series(list("abc"))
with pytest.raises(ValueError, match=msg("slice")):
s[0:3] = list(range(27))
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
tm.assert_series_equal(s.astype(np.int64), expected)
# slice with step
s = Series(list("abcdef"))
with pytest.raises(ValueError, match=msg("slice")):
s[0:4:2] = list(range(27))
s = Series(list("abcdef"))
s[0:4:2] = list(range(2))
expected = Series([0, "b", 1, "d", "e", "f"])
tm.assert_series_equal(s, expected)
# neg slices
s = Series(list("abcdef"))
with pytest.raises(ValueError, match=msg("slice")):
s[:-1] = list(range(27))
s[-3:-1] = list(range(2))
expected = Series(["a", "b", "c", 0, 1, "f"])
tm.assert_series_equal(s, expected)
# list
s = Series(list("abc"))
with pytest.raises(ValueError, match=msg("list-like")):
s[[0, 1, 2]] = list(range(27))
s = Series(list("abc"))
with pytest.raises(ValueError, match=msg("list-like")):
s[[0, 1, 2]] = list(range(2))
# scalar
s = Series(list("abc"))
s[0] = list(range(10))
expected = Series([list(range(10)), "b", "c"])
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize("size", range(2, 6))
@pytest.mark.parametrize(
"mask", [[True, False, False, False, False], [True, False], [False]]
)
@pytest.mark.parametrize(
"item", [2.0, np.nan, np.finfo(float).max, np.finfo(float).min]
)
# Test numpy arrays, lists and tuples as the input to be
# broadcast
@pytest.mark.parametrize(
"box", [lambda x: np.array([x]), lambda x: [x], lambda x: (x,)]
)
def test_broadcast(size, mask, item, box):
selection = np.resize(mask, size)
data = np.arange(size, dtype=float)
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series(
[item if use_item else data[i] for i, use_item in enumerate(selection)]
)
s = Series(data)
s[selection] = box(item)
tm.assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, box(item))
tm.assert_series_equal(result, expected)
s = Series(data)
result = s.mask(selection, box(item))
tm.assert_series_equal(result, expected)
def test_where_inplace():
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
tm.assert_series_equal(rs.dropna(), s[cond])
tm.assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
tm.assert_series_equal(rs, s.where(cond, -s))
def test_where_dups():
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan], index=[0, 1, 2, 0, 1, 2])
tm.assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
tm.assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
tm.assert_series_equal(comb, expected)
def test_where_numeric_with_string():
# GH 9280
s = Series([1, 2, 3])
w = s.where(s > 1, "X")
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == "object"
w = s.where(s > 1, ["X", "Y", "Z"])
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == "object"
w = s.where(s > 1, np.array(["X", "Y", "Z"]))
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == "object"
def test_where_timedelta_coerce():
s = Series([1, 2], dtype="timedelta64[ns]")
expected = Series([10, 10])
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
tm.assert_series_equal(rs, expected)
rs = s.where(mask, 10)
tm.assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
tm.assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
tm.assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype="object")
tm.assert_series_equal(rs, expected)
def test_where_datetime_conversion():
s = Series(date_range("20130102", periods=2))
expected = Series([10, 10])
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
tm.assert_series_equal(rs, expected)
rs = s.where(mask, 10)
tm.assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
tm.assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
tm.assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype="object")
tm.assert_series_equal(rs, expected)
# GH 15701
timestamps = ["2016-12-31 12:00:04+00:00", "2016-12-31 12:00:04.010000+00:00"]
s = Series([Timestamp(t) for t in timestamps])
rs = s.where(Series([False, True]))
expected = Series([pd.NaT, s[1]])
tm.assert_series_equal(rs, expected)
def test_where_dt_tz_values(tz_naive_fixture):
ser1 = Series(
pd.DatetimeIndex(["20150101", "20150102", "20150103"], tz=tz_naive_fixture)
)
ser2 = Series(
pd.DatetimeIndex(["20160514", "20160515", "20160516"], tz=tz_naive_fixture)
)
mask = Series([True, True, False])
result = ser1.where(mask, ser2)
exp = Series(
pd.DatetimeIndex(["20150101", "20150102", "20160516"], tz=tz_naive_fixture)
)
tm.assert_series_equal(exp, result)
def test_where_sparse():
# GH#17198 make sure we dont get an AttributeError for sp_index
ser = Series(pd.arrays.SparseArray([1, 2]))
result = ser.where(ser >= 2, 0)
expected = Series(pd.arrays.SparseArray([0, 2]))
tm.assert_series_equal(result, expected)
def test_where_empty_series_and_empty_cond_having_non_bool_dtypes():
# https://github.com/pandas-dev/pandas/issues/34592
ser = Series([], dtype=float)
result = ser.where([])
tm.assert_series_equal(result, ser)
@pytest.mark.parametrize("klass", [Series, pd.DataFrame])
def test_where_categorical(klass):
# https://github.com/pandas-dev/pandas/issues/18888
exp = klass(
pd.Categorical(["A", "A", "B", "B", np.nan], categories=["A", "B", "C"]),
dtype="category",
)
df = klass(["A", "A", "B", "B", "C"], dtype="category")
res = df.where(df != "C")
tm.assert_equal(exp, res)
def test_where_datetimelike_categorical(tz_naive_fixture):
# GH#37682
tz = tz_naive_fixture
dr = pd.date_range("2001-01-01", periods=3, tz=tz)._with_freq(None)
lvals = pd.DatetimeIndex([dr[0], dr[1], pd.NaT])
rvals = pd.Categorical([dr[0], pd.NaT, dr[2]])
mask = np.array([True, True, False])
# DatetimeIndex.where
res = lvals.where(mask, rvals)
tm.assert_index_equal(res, dr)
# DatetimeArray.where
res = lvals._data.where(mask, rvals)
tm.assert_datetime_array_equal(res, dr._data)
# Series.where
res = Series(lvals).where(mask, rvals)
tm.assert_series_equal(res, Series(dr))
# DataFrame.where
res = pd.DataFrame(lvals).where(mask[:, None], pd.DataFrame(rvals))
tm.assert_frame_equal(res, pd.DataFrame(dr))
| bsd-3-clause | 6,942,425,362,740,138,000 | 26.295547 | 83 | 0.60175 | false |
ManchesterIO/mollyproject-next | tests/molly/apps/places/test_endpoints.py | 1 | 13228 | import json
from urllib import quote_plus
import urllib2
from flask import Flask
from mock import Mock, ANY, MagicMock
from shapely.geometry import Point
import unittest2 as unittest
from werkzeug.exceptions import NotFound
from molly.apps.places.endpoints import PointOfInterestEndpoint, NearbySearchEndpoint, PointOfInterestSearchEndpoint
from molly.apps.places.models import PointOfInterest
class PointOfInterestEndpointTest(unittest.TestCase):
def setUp(self):
self._poi_service = Mock()
self._endpoint = PointOfInterestEndpoint('testplaces', self._poi_service)
def test_point_of_interest_returns_404_when_url_invalid(self):
self._poi_service.select_by_slug.return_value = None
self.assertRaises(NotFound, self._endpoint.get, 'foo:bar')
def test_point_of_interest_returns_correct_self_when_url_valid(self):
self._poi_service.select_by_slug.return_value = PointOfInterest()
response = self._get_response_json()
self.assertEquals('http://mollyproject.org/apps/places/point-of-interest', response['self'])
def test_href_included_in_response(self):
self._poi_service.select_by_slug.return_value = PointOfInterest()
response = self._get_response_json()
self.assertEquals('http://localhost/poi/foo:bar', response['href'])
def test_poi_serialised_in_response(self):
self._poi_service.select_by_slug.return_value = PointOfInterest(telephone_number='999')
response = self._get_response_json()
self.assertEquals('999', response['poi']['telephone_number'])
def test_poi_has_link_to_nearby_search(self):
self._poi_service.select_by_slug.return_value = PointOfInterest(
location=Point(-26.1, 4.5)
)
response = self._get_response_json()
self.assertEquals('http://localhost/nearby/4.5%2C-26.1/', response['links']['nearby'])
def _get_response_json(self):
app = Flask(__name__)
app.add_url_rule('/poi/<slug>', 'testplaces.poi', self._endpoint.get)
app.add_url_rule('/nearby/<float:lat>,<float:lon>/', 'testplaces.nearby', lambda: None)
with app.test_request_context('/', headers=[('Accept', 'application/json')]):
return json.loads(self._endpoint.get('foo:bar').data)
class NearbySearchEndpointTest(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self._poi_service = Mock()
self._poi_service.count_nearby_amenity = Mock(return_value=0)
self._poi_service.count_nearby_category = Mock(return_value=0)
self._poi_service.search_nearby_amenity = Mock(return_value=[])
self._poi_service.search_nearby_category = Mock(return_value=[])
self._endpoint = NearbySearchEndpoint('testplaces', self._poi_service)
self._endpoint.interesting_categories = {
'test': 'http://example.com/testcat',
'test2': 'http://example.com/testcat2'
}
self._endpoint.interesting_amenities = {
'testamen': 'http://example.com/testamen',
}
self._endpoint.SEARCH_RADIUS = 123
self.app = Flask(__name__)
self.app.add_url_rule('/nearby/<float:lat>,<float:lon>', 'testplaces.nearby', self._endpoint.get_nearby)
self.app.add_url_rule(
'/nearby/<float:lat>,<float:lon>/category/<slug>', 'testplaces.nearby_category', self._endpoint.get_category
)
self.app.add_url_rule(
'/nearby/<float:lat>,<float:lon>/amenity/<slug>', 'testplaces.nearby_amenity', self._endpoint.get_amenity
)
self.app.add_url_rule('/poi/<slug>', 'testplaces.poi', None)
def _make_categories_request(self, lat, lon):
with self.app.test_request_context('/', headers=[('Accept', 'application/json')]):
response = self._endpoint.get_nearby(lat, lon)
return response
def _make_category_request(self, lat, lon, slug):
with self.app.test_request_context('/', headers=[('Accept', 'application/json')]):
response = self._endpoint.get_category(lat, lon, slug)
return response
def _make_amenity_request(self, lat, lon, slug):
with self.app.test_request_context('/', headers=[('Accept', 'application/json')]):
response = self._endpoint.get_amenity(lat, lon, slug)
return response
def test_overly_precise_requests_are_rounded_down(self):
response = self._make_categories_request(10.123456789, 15.987654321)
self.assertEqual(302, response.status_code)
self.assertEqual(
'http://localhost/nearby/10.12346,15.98765',
urllib2.unquote(dict(response.headers).get('Location'))
)
def test_search_results_are_in_correct_format(self):
self._poi_service.search_nearby.return_value = []
response = self._make_categories_request(54.5, 0.6)
self.assertEqual({
'self': 'http://mollyproject.org/apps/places/categories',
'location_filter': {
'within': 123,
'centre': {"type": "Point", "coordinates": [0.6, 54.5]}
},
'categories': [],
'amenities': []
}, json.loads(response.data))
def test_interesting_pois_are_searched_against(self):
self._make_categories_request(54.5, 0.6)
self._poi_service.count_nearby_category.assert_any_call(ANY, 'http://example.com/testcat', radius=123)
self._poi_service.count_nearby_category.assert_any_call(ANY, 'http://example.com/testcat2', radius=123)
self._poi_service.count_nearby_amenity.assert_any_call(ANY, 'http://example.com/testamen', radius=123)
point = self._poi_service.count_nearby_category.call_args[0][0]
self.assertEqual((0.6, 54.5), (point.x, point.y))
point = self._poi_service.count_nearby_amenity.call_args[0][0]
self.assertEqual((0.6, 54.5), (point.x, point.y))
def test_result_lists_are_in_correct_form(self):
self._poi_service.count_nearby_category = Mock(side_effect=[3, 2])
self._poi_service.count_nearby_amenity = Mock(return_value=6)
response = json.loads(self._make_categories_request(12.3, 6.8).data)
self.assertEqual({
'self': 'http://mollyproject.org/apps/places/categories',
'location_filter': {
'within': 123,
'centre': {"type": "Point", "coordinates": [6.8, 12.3]}
},
'categories': [{
'self': 'http://mollyproject.org/apps/places/points-of-interest/by-category',
'href': 'http://localhost/nearby/12.3%2C6.8/category/test',
'category': 'http://example.com/testcat',
'count': 3
}, {
'self': 'http://mollyproject.org/apps/places/points-of-interest/by-category',
'href': 'http://localhost/nearby/12.3%2C6.8/category/test2',
'category': 'http://example.com/testcat2',
'count': 2
}],
'amenities': [{
'self': 'http://mollyproject.org/apps/places/points-of-interest/by-amenity',
'href': 'http://localhost/nearby/12.3%2C6.8/amenity/testamen',
'amenity': 'http://example.com/testamen',
'count': 6
}]
}, response)
def test_by_category_redirects_when_appropriate(self):
response = self._make_category_request(10.123456789, 15.987654321, 'test')
self.assertEqual(302, response.status_code)
self.assertEqual(
'http://localhost/nearby/10.12346,15.98765/category/test',
urllib2.unquote(dict(response.headers).get('Location'))
)
def test_by_amenity_redirects_when_appropriate(self):
response = self._make_amenity_request(10.123456789, 15.987654321, 'test')
self.assertEqual(302, response.status_code)
self.assertEqual(
'http://localhost/nearby/10.12346,15.98765/amenity/test',
urllib2.unquote(dict(response.headers).get('Location'))
)
def test_by_category_response_404s_if_invalid_slug_specified(self):
self.assertRaises(NotFound, self._make_category_request, 6.8, 12.4, 'invalid')
def test_by_category_returns_results_in_correct_form(self):
response = json.loads(self._make_category_request(15.4, 12.6, 'test').data)
self.assertEqual(
{
'self': 'http://mollyproject.org/apps/places/points-of-interest/by-category',
'location_filter': {
'within': 123,
'centre': {"type": "Point", "coordinates": [12.6, 15.4]}
},
'category': 'http://example.com/testcat',
'points_of_interest': [],
'count': 0,
'within': 123
},
response
)
def test_by_category_makes_correct_request_to_service(self):
self._make_category_request(15.4, 12.6, 'test')
self._poi_service.search_nearby_category.assert_any_call(ANY, 'http://example.com/testcat', radius=123)
point = self._poi_service.search_nearby_category.call_args[0][0]
self.assertEqual((12.6, 15.4), (point.x, point.y))
def test_by_category_includes_serialised_dict(self):
telephone_number = '+44123456789'
self._poi_service.search_nearby_category.return_value = [
PointOfInterest(slug='test', telephone_number=telephone_number)
]
response = json.loads(self._make_category_request(15.4, 12.6, 'test').data)
self.assertEquals(1, response['count'])
self.assertEquals(
'http://mollyproject.org/apps/places/point-of-interest', response['points_of_interest'][0]['self']
)
self.assertEquals('http://localhost/poi/test', response['points_of_interest'][0]['href'])
self.assertEquals(telephone_number, response['points_of_interest'][0]['poi']['telephone_number'])
def test_by_amenity_response_404s_if_invalid_slug_specified(self):
self.assertRaises(NotFound, self._make_amenity_request, 6.8, 12.4, 'invalid')
def test_by_amenity_returns_results_in_correct_form(self):
response = json.loads(self._make_amenity_request(15.4, 12.6, 'testamen').data)
self.assertEqual(
{
'self': 'http://mollyproject.org/apps/places/points-of-interest/by-amenity',
'location_filter': {
'within': 123,
'centre': {"type": "Point", "coordinates": [12.6, 15.4]}
},
'amenity': 'http://example.com/testamen',
'points_of_interest': [],
'count': 0,
'within': 123
},
response
)
def test_by_amenity_makes_correct_request_to_service(self):
self._make_amenity_request(15.4, 12.6, 'testamen')
self._poi_service.search_nearby_amenity.assert_any_call(ANY, 'http://example.com/testamen', radius=123)
point = self._poi_service.search_nearby_amenity.call_args[0][0]
self.assertEqual((12.6, 15.4), (point.x, point.y))
def test_by_amenity_includes_serialised_dict(self):
telephone_number = '+44123456789'
self._poi_service.search_nearby_amenity.return_value = [
PointOfInterest(slug='test', telephone_number=telephone_number)
]
response = json.loads(self._make_amenity_request(15.4, 12.6, 'testamen').data)
self.assertEquals(1, response['count'])
self.assertEquals(
'http://mollyproject.org/apps/places/point-of-interest', response['points_of_interest'][0]['self']
)
self.assertEquals('http://localhost/poi/test', response['points_of_interest'][0]['href'])
self.assertEquals(telephone_number, response['points_of_interest'][0]['poi']['telephone_number'])
class PointOfInterestSearchEndpointTest(unittest.TestCase):
def setUp(self):
self._poi_service = Mock()
self._poi_service.name_search = Mock(return_value=[])
self._app = Flask(__name__)
self._endpoint = PointOfInterestSearchEndpoint('test', self._poi_service)
self._app.add_url_rule('/search', 'test.search', self._endpoint.get)
self._app.add_url_rule('/<slug>', 'test.poi', self._endpoint.get)
def test_making_search_passes_it_to_service(self):
self._poi_service.search_name.return_value = []
search_terms = "test search"
self._make_search_request(search_terms)
self._poi_service.search_name.assert_called_once_with(search_terms)
def test_return_value_from_service_is_serialised_to_json(self):
self._poi_service.search_name.return_value = [PointOfInterest(slug='test:test')]
response = self._make_search_request("test")
self.assertEquals('test:test', response['results'][0]['poi']['slug'])
def _make_search_request(self, search_terms):
with self._app.test_request_context(
'/search?q={q}'.format(q=quote_plus(search_terms)), headers=[('Accept', 'application/json')]
):
response = self._endpoint.get()
return json.loads(response.data) | apache-2.0 | -4,325,968,840,956,645,400 | 44.304795 | 120 | 0.617176 | false |
gaapt/deepdive | src/test/resources/spouse/udf/ext_has_spouse.py | 1 | 1973 | #! /usr/bin/env python
import csv
import os
import sys
from collections import defaultdict
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
# Load the spouse dictionary for distant supervision
spouses = defaultdict(lambda: None)
with open (BASE_DIR + "/../data/spouses.csv") as csvfile:
reader = csv.reader(csvfile)
for line in reader:
spouses[line[0].strip().lower()] = line[1].strip().lower()
# Load relations of people that are not spouse
non_spouses = set()
lines = open(BASE_DIR + '/../data/non-spouses.tsv').readlines()
for line in lines:
name1, name2, relation = line.strip().split('\t')
non_spouses.add((name1, name2)) # Add a non-spouse relation pair
# For each input tuple
for row in sys.stdin:
parts = row.strip().split('\t')
if len(parts) != 5:
print >>sys.stderr, 'Failed to parse row:', row
continue
sentence_id, p1_id, p1_text, p2_id, p2_text = parts
p1_text = p1_text.strip()
p2_text = p2_text.strip()
p1_text_lower = p1_text.lower()
p2_text_lower = p2_text.lower()
# See if the combination of people is in our supervision dictionary
# If so, set is_correct to true or false
is_true = '\N'
if spouses[p1_text_lower] == p2_text_lower:
is_true = '1'
if spouses[p2_text_lower] == p1_text_lower:
is_true = '1'
elif (p1_text == p2_text) or (p1_text in p2_text) or (p2_text in p1_text):
is_true = '0'
elif (p1_text_lower, p2_text_lower) in non_spouses:
is_true = '0'
elif (p2_text_lower, p1_text_lower) in non_spouses:
is_true = '0'
print '\t'.join([
p1_id, p2_id, sentence_id,
"%s-%s" %(p1_text, p2_text),
is_true,
"%s-%s" %(p1_id, p2_id),
'\N' # leave "id" blank for system!
])
# TABLE FORMAT: CREATE TABLE has_spouse(
# person1_id bigint,
# person2_id bigint,
# sentence_id bigint,
# description text,
# is_true boolean,
# relation_id bigint, -- unique identifier for has_spouse
# id bigint -- reserved for DeepDive
# );
| apache-2.0 | -6,167,275,737,590,914,000 | 28.014706 | 76 | 0.642169 | false |
google-research-datasets/natural-questions | eval_utils.py | 1 | 10966 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility function for nq evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import glob
from gzip import GzipFile
import json
import multiprocessing
from absl import flags
from absl import logging
flags.DEFINE_integer(
'long_non_null_threshold', 2,
'Require this many non-null long answer annotations '
'to count gold as containing a long answer.')
flags.DEFINE_integer(
'short_non_null_threshold', 2,
'Require this many non-null short answer annotations '
'to count gold as containing a short answer.')
FLAGS = flags.FLAGS
# A data structure for storing prediction and annotation.
# When a example has multiple annotations, multiple NQLabel will be used.
NQLabel = collections.namedtuple(
'NQLabel',
[
'example_id', # the unique id for each NQ example.
'long_answer_span', # A Span object for long answer.
'short_answer_span_list', # A list of Spans for short answer.
# Note that In NQ, the short answers
# do not need to be in a single span.
'yes_no_answer', # Indicate if the short answer is an yes/no answer
# The possible values are "yes", "no", "none".
# (case insensitive)
# If the field is "yes", short_answer_span_list
# should be empty or only contain null spans.
'long_score', # The prediction score for the long answer prediction.
'short_score' # The prediction score for the short answer prediction.
])
class Span(object):
"""A class for handling token and byte spans.
The logic is:
1) if both start_byte != -1 and end_byte != -1 then the span is defined
by byte offsets
2) else, if start_token != -1 and end_token != -1 then the span is define
by token offsets
3) else, this is a null span.
Null spans means that there is no (long or short) answers.
If your systems only care about token spans rather than byte spans, set all
byte spans to -1.
"""
def __init__(self, start_byte, end_byte, start_token_idx, end_token_idx):
if ((start_byte < 0 and end_byte >= 0) or
(start_byte >= 0 and end_byte < 0)):
raise ValueError('Inconsistent Null Spans (Byte).')
if ((start_token_idx < 0 and end_token_idx >= 0) or
(start_token_idx >= 0 and end_token_idx < 0)):
raise ValueError('Inconsistent Null Spans (Token).')
if start_byte >= 0 and end_byte >= 0 and start_byte >= end_byte:
raise ValueError('Invalid byte spans (start_byte >= end_byte).')
if ((start_token_idx >= 0 and end_token_idx >= 0) and
(start_token_idx >= end_token_idx)):
raise ValueError('Invalid token spans (start_token_idx >= end_token_idx)')
self.start_byte = start_byte
self.end_byte = end_byte
self.start_token_idx = start_token_idx
self.end_token_idx = end_token_idx
def is_null_span(self):
"""A span is a null span if the start and end are both -1."""
if (self.start_byte < 0 and self.end_byte < 0 and
self.start_token_idx < 0 and self.end_token_idx < 0):
return True
return False
def __str__(self):
byte_str = 'byte: [' + str(self.start_byte) + ',' + str(self.end_byte) + ')'
tok_str = ('tok: [' + str(self.start_token_idx) + ',' +
str(self.end_token_idx) + ')')
return byte_str + ' ' + tok_str
def __repr__(self):
return self.__str__()
def is_null_span_list(span_list):
"""Returns true iff all spans in span_list are null or span_list is empty."""
if not span_list or all([span.is_null_span() for span in span_list]):
return True
return False
def nonnull_span_equal(span_a, span_b):
"""Given two spans, return if they are equal.
Args:
span_a: a Span object.
span_b: a Span object. Only compare non-null spans. First, if the bytes are
not negative, compare byte offsets, Otherwise, compare token offsets.
Returns:
True or False
"""
assert isinstance(span_a, Span)
assert isinstance(span_b, Span)
assert not span_a.is_null_span()
assert not span_b.is_null_span()
# if byte offsets are not negative, compare byte offsets
if ((span_a.start_byte >= 0 and span_a.end_byte >= 0) and
(span_b.start_byte >= 0 and span_b.end_byte >= 0)):
if ((span_a.start_byte == span_b.start_byte) and
(span_a.end_byte == span_b.end_byte)):
return True
# if token offsets are not negative, compare token offsets
if ((span_a.start_token_idx >= 0 and span_a.end_token_idx >= 0) and
(span_b.start_token_idx >= 0 and span_b.end_token_idx >= 0)):
if ((span_a.start_token_idx == span_b.start_token_idx) and
(span_a.end_token_idx == span_b.end_token_idx)):
return True
return False
def span_set_equal(gold_span_list, pred_span_list):
"""Make the spans are completely equal besides null spans."""
gold_span_list = [span for span in gold_span_list if not span.is_null_span()]
pred_span_list = [span for span in pred_span_list if not span.is_null_span()]
for pspan in pred_span_list:
# not finding pspan equal to any spans in gold_span_list
if not any([nonnull_span_equal(pspan, gspan) for gspan in gold_span_list]):
return False
for gspan in gold_span_list:
# not finding gspan equal to any spans in pred_span_list
if not any([nonnull_span_equal(pspan, gspan) for pspan in pred_span_list]):
return False
return True
def gold_has_short_answer(gold_label_list):
"""Gets vote from multi-annotators for judging if there is a short answer."""
# We consider if there is a short answer if there is an short answer span or
# the yes/no answer is not none.
gold_has_answer = gold_label_list and sum([
((not is_null_span_list(label.short_answer_span_list)) or
(label.yes_no_answer != 'none')) for label in gold_label_list
]) >= FLAGS.short_non_null_threshold
return gold_has_answer
def gold_has_long_answer(gold_label_list):
"""Gets vote from multi-annotators for judging if there is a long answer."""
gold_has_answer = gold_label_list and (sum([
not label.long_answer_span.is_null_span() # long answer not null
for label in gold_label_list # for each annotator
]) >= FLAGS.long_non_null_threshold)
return gold_has_answer
def read_prediction_json(predictions_path):
"""Read the prediction json with scores.
Args:
predictions_path: the path for the prediction json.
Returns:
A dictionary with key = example_id, value = NQInstancePrediction.
"""
logging.info('Reading predictions from file: %s', format(predictions_path))
with open(predictions_path, 'r') as f:
predictions = json.loads(f.read())
nq_pred_dict = {}
for single_prediction in predictions['predictions']:
if 'long_answer' in single_prediction:
long_span = Span(single_prediction['long_answer']['start_byte'],
single_prediction['long_answer']['end_byte'],
single_prediction['long_answer']['start_token'],
single_prediction['long_answer']['end_token'])
else:
long_span = Span(-1, -1, -1, -1) # Span is null if not presented.
short_span_list = []
if 'short_answers' in single_prediction:
for short_item in single_prediction['short_answers']:
short_span_list.append(
Span(short_item['start_byte'], short_item['end_byte'],
short_item['start_token'], short_item['end_token']))
yes_no_answer = 'none'
if 'yes_no_answer' in single_prediction:
yes_no_answer = single_prediction['yes_no_answer'].lower()
if yes_no_answer not in ['yes', 'no', 'none']:
raise ValueError('Invalid yes_no_answer value in prediction')
if yes_no_answer != 'none' and not is_null_span_list(short_span_list):
raise ValueError('yes/no prediction and short answers cannot coexist.')
pred_item = NQLabel(
example_id=single_prediction['example_id'],
long_answer_span=long_span,
short_answer_span_list=short_span_list,
yes_no_answer=yes_no_answer,
long_score=single_prediction['long_answer_score'],
short_score=single_prediction['short_answers_score'])
nq_pred_dict[single_prediction['example_id']] = pred_item
return nq_pred_dict
def read_annotation_from_one_split(gzipped_input_file):
"""Read annotation from one split of file."""
if isinstance(gzipped_input_file, str):
gzipped_input_file = open(gzipped_input_file, 'rb')
logging.info('parsing %s ..... ', gzipped_input_file.name)
annotation_dict = {}
with GzipFile(fileobj=gzipped_input_file) as input_file:
for line in input_file:
json_example = json.loads(line)
example_id = json_example['example_id']
# There are multiple annotations for one nq example.
annotation_list = []
for annotation in json_example['annotations']:
long_span_rec = annotation['long_answer']
long_span = Span(long_span_rec['start_byte'], long_span_rec['end_byte'],
long_span_rec['start_token'],
long_span_rec['end_token'])
short_span_list = []
for short_span_rec in annotation['short_answers']:
short_span = Span(short_span_rec['start_byte'],
short_span_rec['end_byte'],
short_span_rec['start_token'],
short_span_rec['end_token'])
short_span_list.append(short_span)
gold_label = NQLabel(
example_id=example_id,
long_answer_span=long_span,
short_answer_span_list=short_span_list,
long_score=0,
short_score=0,
yes_no_answer=annotation['yes_no_answer'].lower())
annotation_list.append(gold_label)
annotation_dict[example_id] = annotation_list
return annotation_dict
def read_annotation(path_name, n_threads=10):
"""Read annotations with real multiple processes."""
input_paths = glob.glob(path_name)
pool = multiprocessing.Pool(n_threads)
try:
dict_list = pool.map(read_annotation_from_one_split, input_paths)
finally:
pool.close()
pool.join()
final_dict = {}
for single_dict in dict_list:
final_dict.update(single_dict)
return final_dict
| apache-2.0 | 4,003,235,773,756,111,400 | 34.035144 | 80 | 0.648915 | false |
Azure/azure-sdk-for-python | setup.py | 1 | 2261 | #!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
from __future__ import print_function
import os.path
import glob
import copy
import sys
import runpy
root_folder = os.path.abspath(os.path.dirname(__file__))
# pull in any packages that exist in the root directory
packages = {('.', os.path.dirname(p)) for p in glob.glob('azure*/setup.py')}
# Handle the SDK folder as well
packages.update({tuple(os.path.dirname(f).rsplit(os.sep, 1)) for f in glob.glob('sdk/*/azure*/setup.py')})
# [(base_folder, package_name), ...] to {package_name: base_folder, ...}
packages = {package_name: base_folder for (base_folder, package_name) in packages}
# Extract nspkg and sort nspkg by number of "-"
nspkg_packages = [p for p in packages.keys() if "nspkg" in p]
nspkg_packages.sort(key = lambda x: len([c for c in x if c == '-']))
# Meta-packages to ignore
meta_package = ['azure-keyvault', 'azure-mgmt', 'azure', 'azure-storage']
# content packages are packages that are not meta nor nspkg
content_package = sorted([p for p in packages.keys() if p not in meta_package+nspkg_packages])
# Move azure-common at the beginning, it's important this goes first
content_package.remove("azure-common")
content_package.insert(0, "azure-common")
# Package final:
if "install" in sys.argv:
packages_for_installation = content_package
else:
packages_for_installation = nspkg_packages + content_package
for pkg_name in packages_for_installation:
pkg_setup_folder = os.path.join(root_folder, packages[pkg_name], pkg_name)
pkg_setup_path = os.path.join(pkg_setup_folder, 'setup.py')
try:
saved_dir = os.getcwd()
saved_syspath = sys.path
os.chdir(pkg_setup_folder)
sys.path = [pkg_setup_folder] + copy.copy(saved_syspath)
print("Start ", pkg_setup_path)
result = runpy.run_path(pkg_setup_path)
except Exception as e:
print(e, file=sys.stderr)
finally:
os.chdir(saved_dir)
sys.path = saved_syspath
| mit | -1,612,231,462,446,572,300 | 35.435484 | 106 | 0.648074 | false |
defeo/cypari2 | autogen/doc.py | 1 | 10642 | # -*- coding: utf-8 -*-
"""
Handle PARI documentation
"""
from __future__ import unicode_literals
import re
import subprocess
leading_ws = re.compile("^( +)", re.MULTILINE)
trailing_ws = re.compile("( +)$", re.MULTILINE)
double_space = re.compile(" +")
end_space = re.compile(r"(@\[end[a-z]*\])([A-Za-z])")
end_paren = re.compile(r"(@\[end[a-z]*\])([(])")
begin_verb = re.compile(r"@1")
end_verb = re.compile(r"@[23] *@\[endcode\]")
verb_loop = re.compile("^( .*)@\[[a-z]*\]", re.MULTILINE)
dollars = re.compile(r"@\[dollar\]\s*(.*?)\s*@\[dollar\]", re.DOTALL)
doubledollars = re.compile(r"@\[doubledollar\]\s*(.*?)\s*@\[doubledollar\] *", re.DOTALL)
math_loop = re.compile(r"(@\[start[A-Z]*MATH\][^@]*)@\[[a-z]*\]")
math_backslash = re.compile(r"(@\[start[A-Z]*MATH\][^@]*)=BACKSLASH=")
prototype = re.compile("^[^\n]*\n\n")
library_syntax = re.compile("The library syntax is.*", re.DOTALL)
newlines = re.compile("\n\n\n\n*")
bullet_loop = re.compile("(@BULLET( [^\n]*\n)*)([^ \n])")
indent_math = re.compile("(@\\[startDISPLAYMATH\\].*\n(.+\n)*)(\\S)")
escape_backslash = re.compile(r"^(\S.*)[\\]", re.MULTILINE)
escape_mid = re.compile(r"^(\S.*)[|]", re.MULTILINE)
escape_percent = re.compile(r"^(\S.*)[%]", re.MULTILINE)
escape_hash = re.compile(r"^(\S.*)[#]", re.MULTILINE)
label_define = re.compile(r"@\[label [a-zA-Z0-9:]*\]")
label_ref = re.compile(r"(Section *)?@\[startref\](se:)?([^@]*)@\[endref\]")
def sub_loop(regex, repl, text):
"""
In ``text``, substitute ``regex`` by ``repl`` recursively. As long
as substitution is possible, ``regex`` is substituted.
INPUT:
- ``regex`` -- a compiled regular expression
- ``repl`` -- replacement text
- ``text`` -- input text
OUTPUT: substituted text
EXAMPLES:
Ensure there a space between any 2 letters ``x``::
>>> from autogen.doc import sub_loop
>>> import re
>>> print(sub_loop(re.compile("xx"), "x x", "xxx_xx"))
x x x_x x
"""
while True:
text, n = regex.subn(repl, text)
if not n:
return text
def raw_to_rest(doc):
r"""
Convert raw PARI documentation (with ``@``-codes) to reST syntax.
INPUT:
- ``doc`` -- the raw PARI documentation
OUTPUT: a unicode string
EXAMPLES::
>>> from autogen.doc import raw_to_rest
>>> print(raw_to_rest(b"@[startbold]hello world@[endbold]"))
:strong:`hello world`
TESTS::
>>> raw_to_rest(b"@[invalid]")
Traceback (most recent call last):
...
SyntaxError: @ found: @[invalid]
>>> s = b'@3@[startbold]*@[endbold] snip @[dollar]0@[dollar]\ndividing @[dollar]#E@[dollar].'
>>> print(raw_to_rest(s))
- snip :math:`0`
dividing :math:`\#E`.
"""
doc = doc.decode("utf-8")
# Work around a specific problem with doc of "component"
doc = doc.replace("[@[dollar]@[dollar]]", "[]")
# Work around a specific problem with doc of "algdivl"
doc = doc.replace(r"\y@", r"\backslash y@")
# Special characters
doc = doc.replace("@[lt]", "<")
doc = doc.replace("@[gt]", ">")
doc = doc.replace("@[pm]", "±")
doc = doc.replace("@[nbrk]", "\xa0")
doc = doc.replace("@[agrave]", "à")
doc = doc.replace("@[aacute]", "á")
doc = doc.replace("@[eacute]", "é")
doc = doc.replace("@[ouml]", "ö")
doc = doc.replace("@[uuml]", "ü")
doc = doc.replace("\\'{a}", "á")
# Remove leading and trailing whitespace from every line
doc = leading_ws.sub("", doc)
doc = trailing_ws.sub("", doc)
# Remove multiple spaces
doc = double_space.sub(" ", doc)
# Sphinx dislikes inline markup immediately followed by a letter:
# insert a non-breaking space
doc = end_space.sub("\\1\xa0\\2", doc)
# Similarly, for inline markup immediately followed by an open
# parenthesis, insert a space
doc = end_paren.sub("\\1 \\2", doc)
# Fix labels and references
doc = label_define.sub("", doc)
doc = label_ref.sub("``\\3`` (in the PARI manual)", doc)
# Bullet items
doc = doc.replace("@3@[startbold]*@[endbold] ", "@BULLET ")
doc = sub_loop(bullet_loop, "\\1 \\3", doc)
doc = doc.replace("@BULLET ", "- ")
# Add =VOID= in front of all leading whitespace (which was
# intentionally added) to avoid confusion with verbatim blocks.
doc = leading_ws.sub(r"=VOID=\1", doc)
# Verbatim blocks
doc = begin_verb.sub("::\n\n@0", doc)
doc = end_verb.sub("", doc)
doc = doc.replace("@0", " ")
doc = doc.replace("@3", "")
# Remove all further markup from within verbatim blocks
doc = sub_loop(verb_loop, "\\1", doc)
# Pair dollars -> beginmath/endmath
doc = doc.replace("@[dollar]@[dollar]", "@[doubledollar]")
doc = dollars.sub(r"@[startMATH]\1@[endMATH]", doc)
doc = doubledollars.sub(r"@[startDISPLAYMATH]\1@[endDISPLAYMATH]", doc)
# Replace special characters (except in verbatim blocks)
# \ -> =BACKSLASH=
# | -> =MID=
# % -> =PERCENT=
# # -> =HASH=
doc = sub_loop(escape_backslash, "\\1=BACKSLASH=", doc)
doc = sub_loop(escape_mid, "\\1=MID=", doc)
doc = sub_loop(escape_percent, "\\1=PERCENT=", doc)
doc = sub_loop(escape_hash, "\\1=HASH=", doc)
# Math markup
doc = doc.replace("@[obr]", "{")
doc = doc.replace("@[cbr]", "}")
doc = doc.replace("@[startword]", "\\")
doc = doc.replace("@[endword]", "")
# (special rules for Hom and Frob, see trac ticket 21005)
doc = doc.replace("@[startlword]Hom@[endlword]", "\\text{Hom}")
doc = doc.replace("@[startlword]Frob@[endlword]", "\\text{Frob}")
doc = doc.replace("@[startlword]", "\\")
doc = doc.replace("@[endlword]", "")
doc = doc.replace("@[startbi]", "\\mathbb{")
doc = doc.replace("@[endbi]", "}")
# PARI TeX macros
doc = doc.replace(r"\Cl", r"\mathrm{Cl}")
doc = doc.replace(r"\Id", r"\mathrm{Id}")
doc = doc.replace(r"\Norm", r"\mathrm{Norm}")
doc = doc.replace(r"\disc", r"\mathrm{disc}")
doc = doc.replace(r"\gcd", r"\mathrm{gcd}")
doc = doc.replace(r"\lcm", r"\mathrm{lcm}")
# Remove extra markup inside math blocks
doc = sub_loop(math_loop, "\\1", doc)
# Replace special characters by escape sequences
# Note that =BACKSLASH= becomes an unescaped backslash in math mode
# but an escaped backslash otherwise.
doc = sub_loop(math_backslash, r"\1\\", doc)
doc = doc.replace("=BACKSLASH=", r"\\")
doc = doc.replace("=MID=", r"\|")
doc = doc.replace("=PERCENT=", r"\%")
doc = doc.replace("=HASH=", r"\#")
doc = doc.replace("=VOID=", "")
# Handle DISPLAYMATH
doc = doc.replace("@[endDISPLAYMATH]", "\n\n")
doc = sub_loop(indent_math, "\\1 \\3", doc)
doc = doc.replace("@[startDISPLAYMATH]", "\n\n.. MATH::\n\n ")
# Inline markup. We do use the more verbose :foo:`text` style since
# those nest more easily.
doc = doc.replace("@[startMATH]", ":math:`")
doc = doc.replace("@[endMATH]", "`")
doc = doc.replace("@[startpodcode]", "``")
doc = doc.replace("@[endpodcode]", "``")
doc = doc.replace("@[startcode]", ":literal:`")
doc = doc.replace("@[endcode]", "`")
doc = doc.replace("@[startit]", ":emphasis:`")
doc = doc.replace("@[endit]", "`")
doc = doc.replace("@[startbold]", ":strong:`")
doc = doc.replace("@[endbold]", "`")
# Remove prototype
doc = prototype.sub("", doc)
# Remove everything starting with "The library syntax is"
# (this is not relevant for Python)
doc = library_syntax.sub("", doc)
# Allow at most 2 consecutive newlines
doc = newlines.sub("\n\n", doc)
# Strip result
doc = doc.strip()
# Ensure no more @ remains
try:
i = doc.index("@")
except ValueError:
return doc
ilow = max(0, i-30)
ihigh = min(len(doc), i+30)
raise SyntaxError("@ found: " + doc[ilow:ihigh])
def get_raw_doc(function):
r"""
Get the raw documentation of PARI function ``function``.
INPUT:
- ``function`` -- name of a PARI function
EXAMPLES::
>>> from autogen.doc import get_raw_doc
>>> print(get_raw_doc("cos").decode())
@[startbold]cos@[dollar](x)@[dollar]:@[endbold]
<BLANKLINE>
@[label se:cos]
Cosine of @[dollar]x@[dollar].
...
>>> get_raw_doc("abcde")
Traceback (most recent call last):
...
RuntimeError: no help found for 'abcde'
"""
doc = subprocess.check_output(["gphelp", "-raw", function])
if doc.endswith(b"""' not found !\n"""):
raise RuntimeError("no help found for '{}'".format(function))
return doc
def get_rest_doc(function):
r"""
Get the documentation of the PARI function ``function`` in reST
syntax.
INPUT:
- ``function`` -- name of a PARI function
EXAMPLES::
>>> from autogen.doc import get_rest_doc
>>> print(get_rest_doc("teichmuller"))
Teichmüller character of the :math:`p`-adic number :math:`x`, i.e. the unique
:math:`(p-1)`-th root of unity congruent to :math:`x / p^{v_p(x)}` modulo :math:`p`...
::
>>> print(get_rest_doc("weber"))
One of Weber's three :math:`f` functions.
If :math:`flag = 0`, returns
<BLANKLINE>
.. MATH::
<BLANKLINE>
f(x) = \exp (-i\pi/24).\eta ((x+1)/2)/\eta (x) {such that}
j = (f^{24}-16)^3/f^{24},
<BLANKLINE>
where :math:`j` is the elliptic :math:`j`-invariant (see the function :literal:`ellj`).
If :math:`flag = 1`, returns
<BLANKLINE>
.. MATH::
<BLANKLINE>
f_1(x) = \eta (x/2)/\eta (x) {such that}
j = (f_1^{24}+16)^3/f_1^{24}.
<BLANKLINE>
Finally, if :math:`flag = 2`, returns
<BLANKLINE>
.. MATH::
<BLANKLINE>
f_2(x) = \sqrt{2}\eta (2x)/\eta (x) {such that}
j = (f_2^{24}+16)^3/f_2^{24}.
<BLANKLINE>
Note the identities :math:`f^8 = f_1^8+f_2^8` and :math:`ff_1f_2 = \sqrt2`.
::
>>> doc = get_rest_doc("ellap") # doc depends on PARI version
::
>>> print(get_rest_doc("bitor"))
bitwise (inclusive)
:literal:`or` of two integers :math:`x` and :math:`y`, that is the integer
<BLANKLINE>
.. MATH::
<BLANKLINE>
\sum
(x_i or y_i) 2^i
<BLANKLINE>
See ``bitand`` (in the PARI manual) for the behavior for negative arguments.
"""
raw = get_raw_doc(function)
return raw_to_rest(raw)
| gpl-2.0 | -2,136,652,580,817,424,100 | 30.178886 | 101 | 0.554552 | false |
jalanb/dotsite | pysyte/cli/streams.py | 1 | 1979 | """Module to handle streams of text from cli arguments"""
import os
import sys
import contextlib
from itertools import chain
from six import StringIO
from pysyte import iteration
from pysyte.cli import arguments
from pysyte.oss.platforms import get_clipboard_data
def parse_args():
"""Parse out command line arguments"""
parser = arguments.parser(__doc__)
parser.args('streams', help='streams to use')
parser.opt('-p', '--paste', 'paste text from clipboard')
parser.opt('-i', '--stdin', 'wait for text from stdin')
return parser.parse_args()
def args(parsed_args, name=None, files_only=False):
"""Interpret parsed args to streams"""
strings = parsed_args.get_strings(name)
files = [s for s in strings if os.path.isfile(s)]
if files:
streams = [open(f) for f in files]
elif files_only:
return []
else:
streams = []
if '-' in files or not files or getattr(parsed_args, 'stdin', False):
streams.append(sys.stdin)
if getattr(parsed_args, 'paste', not files):
streams.append(clipboard_stream())
return streams
def files(parsed_args, name=None):
return args(parsed_args, name, True)
def all():
yielded = False
for path in _arg_paths():
yield open(path)
yielded = True
if not yielded or '-' in sys.argv:
yield sys.stdin
def some():
if sys.argv[1:]:
assert _arg_files()
return any()
def clipboard_stream(name=None):
stream = StringIO(get_clipboard_data())
stream.name = name or '<clipboard>'
return stream
def _arg_files():
return [a for a in sys.argv[1:] if os.path.isfile(a)]
def _arg_streams():
"""yield streams to all arg.isfile()"""
for path in _arg_files():
yield open(path)
def _any():
try:
stream = iteration.first(_arg_streams())
if stream:
return _arg_streams()
except ValueError:
return iter([clipboard_stream(), sys.stdin])
| mit | 2,554,933,082,323,681,300 | 23.7375 | 73 | 0.632643 | false |
IRDeNial/gBot | main.py | 1 | 8349 | #!/usr/bin/env python
import string
import ctypes
import os
from os import path
from threading import Thread
from time import sleep
import re
# For headless browsing. Life made easy.
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
# Tkinter main
from Tkinter import Tk, Frame
# Tkinter GUI widgets
from Tkinter import Button, Text, Entry, Label, Toplevel, Spinbox
# Tkinter alignment options
from Tkinter import LEFT, RIGHT, BOTH, CENTER
# Tkinter directions W=West, E=East, etc...
from Tkinter import N, NE, E, SE, S, SW, W, NW
# Tkinter state options
from Tkinter import DISABLED, NORMAL
# Tkinter position options
from Tkinter import END
# Style? Idk, but this is cool
from ttk import Style
class Application(Frame):
threadRunning = False
def __init__(self, parent):
Frame.__init__(self, parent)
self.parent = parent
self.initUI()
def threadded(self,*config):
self.threadRunning = True
debugger = config[5]
self.addDebug(debugger,"Posting thread started")
if config[0] == "":
self.addDebug(debugger," - No username")
self.addDebug(debugger,"Posting thread finished")
self.threadRunning = False
return
if config[1] == "":
self.addDebug(debugger," - No password")
self.addDebug(debugger,"Posting thread finished")
self.threadRunning = False
return
f = open(config[3], "a")
self.addDebug(debugger," - Configuration:")
self.addDebug(debugger," Output Path: %s" % (config[3]))
self.addDebug(debugger," # of Posts: %d" % (int(config[4])))
driver = webdriver.PhantomJS('phantomjs')
self.addDebug(debugger," - Driver successfully loaded")
driver.get('https://accounts.google.com/ServiceLogin?service=oz&continue=https://plus.google.com/')
self.addDebug(debugger," - Successfully navigated to G+ login page")
driver.find_element_by_id("Email").send_keys(config[0])
driver.find_element_by_id("Passwd").send_keys(config[1])
driver.find_element_by_name("signIn").click()
self.addDebug(debugger," - Attempting to log in")
if driver.current_url != "https://plus.google.com/":
self.addDebug(debugger," - Incorrect username/password")
else:
self.addDebug(debugger," - Successfully logged in")
profileLink = ""
self.addDebug(debugger," - Searching for profile link")
for element in driver.find_elements_by_tag_name("a"):
if element.get_attribute("aria-label") == "Profile":
m = re.search('/plus.google.com/(.+)', element.get_attribute("href"))
if m:
profileLink = m.group(1)
self.addDebug(debugger," - %s" % (profileLink))
break
for x in range(0,int(config[4])):
tempHolder = driver.find_elements_by_tag_name("body")
self.addDebug(debugger," - Searching for text input")
for element in driver.find_elements_by_tag_name("div"):
if element.get_attribute("guidedhelpid") == "sharebox_textarea":
element.click()
break
sleep(5)
for element in driver.find_elements_by_tag_name("div"):
if element.get_attribute("guidedhelpid") == "sharebox_editor":
tempHolder = element
break
for element in tempHolder.find_elements_by_tag_name("div"):
if element.get_attribute("role") == "textbox":
self.addDebug(debugger, " - Found it!")
self.addDebug(debugger," - Inputting Text")
element.send_keys(config[2])
break
self.addDebug(debugger," - Searching for submit button")
for element in driver.find_elements_by_tag_name("div"):
if element.get_attribute("guidedhelpid") == "shareboxcontrols":
tempHolder = element
break
for element in tempHolder.find_elements_by_tag_name("div"):
if element.get_attribute("guidedhelpid") == "sharebutton":
self.addDebug(debugger, " - Found it!")
element.click()
break
sleep(5)
driver.get("https://plus.google.com/" + profileLink + "/posts")
self.addDebug(debugger," - Searching for post")
for element in driver.find_elements_by_tag_name("a"):
try:
if (profileLink + "/posts/") in element.get_attribute("href"):
f.write(element.get_attribute("href") + "\r\n")
self.addDebug(debugger," - Found URL, saving")
break
except:
continue
self.addDebug(debugger," - Waiting 1 second before another post")
sleep(1)
self.addDebug(debugger,"Posting thread finished")
self.threadRunning = False
f.close()
def addDebug(self,parent,text):
parent.insert('end',"%s\n" % (text))
parent.see(END)
def doPosting(self,me,debugLogger,username,password,message,output,num):
if self.threadRunning != True:
settings = [
username,
password,
message,
output,
num,
debugLogger
]
thread = Thread(target=self.threadded, args=(settings))
thread.start()
else:
self.addDebug(debugLogger,"Attempted to start another posting thread. Bad.")
def initUI(self):
# Main Window
self.parent.title("gBot")
self.style = Style()
self.style.theme_use("default")
self.pack(fill=BOTH, expand=1)
# Debug Window
Toplevel1 = Toplevel(self)
Toplevel1.title("gBot Debug Console")
self.pack(fill=BOTH, expand=1)
TL_T1 = Text(Toplevel1, width=50)
TL_T1.pack()
Toplevel1.state("withdrawn")
Toplevel1.protocol('WM_DELETE_WINDOW', lambda:Toplevel1.state("withdrawn"))
Toplevel1.attributes("-topmost", True)
# Username Input
L1 = Label(self, text="G+ User Name")
L1.grid(row=0, column=0, sticky=E, ipady=1)
E1 = Entry(self, width=30)
E1.grid(row=0, column=1, ipady=1, sticky=E)
# Password Input
L2 = Label(self, text="G+ Password")
L2.grid(row=1, column=0, sticky=E, ipady=1)
E2 = Entry(self, width=30)
E2.grid(row=1, column=1, ipady=1, sticky=E)
# Output Path Input
L3 = Label(self, text="Output Path")
L3.grid(row=2, column=0, sticky=E, pady=1)
E3 = Entry(self, width=30)
E3.grid(row=2, column=1, ipady=1, sticky=E)
E3.insert(0, "%s\links.txt" % (os.getcwd()))
# Num Posts
L4 = Label(self, text="# Of Posts")
L4.grid(row=3, column=0, sticky=E, pady=1)
S1 = Spinbox(self, from_=1, to=9999999, width=28)
S1.grid(row=3, column=1, ipady=1, sticky=E)
# Post Input
T1 = Text(self, width=30)
T1.grid(row=5, columnspan=2, sticky=W+E, pady=1)
# Start button
B1 = Button(self, text="Start Posting", command=lambda:self.doPosting(B1,TL_T1,E1.get(),E2.get(),T1.get(1.0,END),E3.get(),S1.get()))
B1.grid(row=6,columnspan=2, sticky=W+E)
# Debug button
B2 = Button(self, text="Debug log", command=lambda:Toplevel1.state("normal"))
B2.grid(row=7, columnspan=2, sticky=W+E)
self.addDebug(TL_T1,"Started successfully")
def main():
root = Tk()
root.resizable(0,0)
root.wm_iconbitmap("bytecon.ico")
app = Application(root)
root.mainloop()
def messageBox(title, text):
ctypes.windll.user32.MessageBoxA(0, text, title, 1)
if __name__ == '__main__':
main() | bsd-3-clause | -8,223,949,900,782,445,000 | 34.531915 | 140 | 0.554557 | false |
aio-libs/aiomysql | aiomysql/sa/connection.py | 1 | 14895 | # ported from:
# https://github.com/aio-libs/aiopg/blob/master/aiopg/sa/connection.py
import weakref
from sqlalchemy.sql import ClauseElement
from sqlalchemy.sql.dml import UpdateBase
from sqlalchemy.sql.ddl import DDLElement
from . import exc
from .result import create_result_proxy
from .transaction import (RootTransaction, Transaction,
NestedTransaction, TwoPhaseTransaction)
from ..utils import _TransactionContextManager, _SAConnectionContextManager
def noop(k):
return k
class SAConnection:
def __init__(self, connection, engine, compiled_cache=None):
self._connection = connection
self._transaction = None
self._savepoint_seq = 0
self._weak_results = weakref.WeakSet()
self._engine = engine
self._dialect = engine.dialect
self._compiled_cache = compiled_cache
def execute(self, query, *multiparams, **params):
"""Executes a SQL query with optional parameters.
query - a SQL query string or any sqlalchemy expression.
*multiparams/**params - represent bound parameter values to be
used in the execution. Typically, the format is a dictionary
passed to *multiparams:
await conn.execute(
table.insert(),
{"id":1, "value":"v1"},
)
...or individual key/values interpreted by **params::
await conn.execute(
table.insert(), id=1, value="v1"
)
In the case that a plain SQL string is passed, a tuple or
individual values in *multiparams may be passed::
await conn.execute(
"INSERT INTO table (id, value) VALUES (%d, %s)",
(1, "v1")
)
await conn.execute(
"INSERT INTO table (id, value) VALUES (%s, %s)",
1, "v1"
)
Returns ResultProxy instance with results of SQL query
execution.
"""
coro = self._execute(query, *multiparams, **params)
return _SAConnectionContextManager(coro)
def _base_params(self, query, dp, compiled, is_update):
"""
handle params
"""
if dp and isinstance(dp, (list, tuple)):
if is_update:
dp = {c.key: pval for c, pval in zip(query.table.c, dp)}
else:
raise exc.ArgumentError(
"Don't mix sqlalchemy SELECT "
"clause with positional "
"parameters"
)
compiled_params = compiled.construct_params(dp)
processors = compiled._bind_processors
params = [{
key: processors.get(key, noop)(compiled_params[key])
for key in compiled_params
}]
post_processed_params = self._dialect.execute_sequence_format(params)
return post_processed_params[0]
async def _executemany(self, query, dps, cursor):
"""
executemany
"""
result_map = None
if isinstance(query, str):
await cursor.executemany(query, dps)
elif isinstance(query, DDLElement):
raise exc.ArgumentError(
"Don't mix sqlalchemy DDL clause "
"and execution with parameters"
)
elif isinstance(query, ClauseElement):
compiled = query.compile(dialect=self._dialect)
params = []
is_update = isinstance(query, UpdateBase)
for dp in dps:
params.append(
self._base_params(
query,
dp,
compiled,
is_update,
)
)
await cursor.executemany(str(compiled), params)
result_map = compiled._result_columns
else:
raise exc.ArgumentError(
"sql statement should be str or "
"SQLAlchemy data "
"selection/modification clause"
)
ret = await create_result_proxy(
self,
cursor,
self._dialect,
result_map
)
self._weak_results.add(ret)
return ret
async def _execute(self, query, *multiparams, **params):
cursor = await self._connection.cursor()
dp = _distill_params(multiparams, params)
if len(dp) > 1:
return await self._executemany(query, dp, cursor)
elif dp:
dp = dp[0]
result_map = None
if isinstance(query, str):
await cursor.execute(query, dp or None)
elif isinstance(query, ClauseElement):
if self._compiled_cache is not None:
key = query
compiled = self._compiled_cache.get(key)
if not compiled:
compiled = query.compile(dialect=self._dialect)
if dp and dp.keys() == compiled.params.keys() \
or not (dp or compiled.params):
# we only want queries with bound params in cache
self._compiled_cache[key] = compiled
else:
compiled = query.compile(dialect=self._dialect)
if not isinstance(query, DDLElement):
post_processed_params = self._base_params(
query,
dp,
compiled,
isinstance(query, UpdateBase)
)
result_map = compiled._result_columns
else:
if dp:
raise exc.ArgumentError("Don't mix sqlalchemy DDL clause "
"and execution with parameters")
post_processed_params = compiled.construct_params()
result_map = None
await cursor.execute(str(compiled), post_processed_params)
else:
raise exc.ArgumentError("sql statement should be str or "
"SQLAlchemy data "
"selection/modification clause")
ret = await create_result_proxy(
self, cursor, self._dialect, result_map
)
self._weak_results.add(ret)
return ret
async def scalar(self, query, *multiparams, **params):
"""Executes a SQL query and returns a scalar value."""
res = await self.execute(query, *multiparams, **params)
return (await res.scalar())
@property
def closed(self):
"""The readonly property that returns True if connections is closed."""
return self._connection is None or self._connection.closed
@property
def connection(self):
return self._connection
def begin(self):
"""Begin a transaction and return a transaction handle.
The returned object is an instance of Transaction. This
object represents the "scope" of the transaction, which
completes when either the .rollback or .commit method is
called.
Nested calls to .begin on the same SAConnection instance will
return new Transaction objects that represent an emulated
transaction within the scope of the enclosing transaction,
that is::
trans = await conn.begin() # outermost transaction
trans2 = await conn.begin() # "nested"
await trans2.commit() # does nothing
await trans.commit() # actually commits
Calls to .commit only have an effect when invoked via the
outermost Transaction object, though the .rollback method of
any of the Transaction objects will roll back the transaction.
See also:
.begin_nested - use a SAVEPOINT
.begin_twophase - use a two phase/XA transaction
"""
coro = self._begin()
return _TransactionContextManager(coro)
async def _begin(self):
if self._transaction is None:
self._transaction = RootTransaction(self)
await self._begin_impl()
return self._transaction
else:
return Transaction(self, self._transaction)
async def _begin_impl(self):
cur = await self._connection.cursor()
try:
await cur.execute('BEGIN')
finally:
await cur.close()
async def _commit_impl(self):
cur = await self._connection.cursor()
try:
await cur.execute('COMMIT')
finally:
await cur.close()
self._transaction = None
async def _rollback_impl(self):
cur = await self._connection.cursor()
try:
await cur.execute('ROLLBACK')
finally:
await cur.close()
self._transaction = None
async def begin_nested(self):
"""Begin a nested transaction and return a transaction handle.
The returned object is an instance of :class:`.NestedTransaction`.
Nested transactions require SAVEPOINT support in the
underlying database. Any transaction in the hierarchy may
.commit() and .rollback(), however the outermost transaction
still controls the overall .commit() or .rollback() of the
transaction of a whole.
"""
if self._transaction is None:
self._transaction = RootTransaction(self)
await self._begin_impl()
else:
self._transaction = NestedTransaction(self, self._transaction)
self._transaction._savepoint = await self._savepoint_impl()
return self._transaction
async def _savepoint_impl(self, name=None):
self._savepoint_seq += 1
name = 'aiomysql_sa_savepoint_%s' % self._savepoint_seq
cur = await self._connection.cursor()
try:
await cur.execute('SAVEPOINT ' + name)
return name
finally:
await cur.close()
async def _rollback_to_savepoint_impl(self, name, parent):
cur = await self._connection.cursor()
try:
await cur.execute('ROLLBACK TO SAVEPOINT ' + name)
finally:
await cur.close()
self._transaction = parent
async def _release_savepoint_impl(self, name, parent):
cur = await self._connection.cursor()
try:
await cur.execute('RELEASE SAVEPOINT ' + name)
finally:
await cur.close()
self._transaction = parent
async def begin_twophase(self, xid=None):
"""Begin a two-phase or XA transaction and return a transaction
handle.
The returned object is an instance of
TwoPhaseTransaction, which in addition to the
methods provided by Transaction, also provides a
TwoPhaseTransaction.prepare() method.
xid - the two phase transaction id. If not supplied, a
random id will be generated.
"""
if self._transaction is not None:
raise exc.InvalidRequestError(
"Cannot start a two phase transaction when a transaction "
"is already in progress.")
if xid is None:
xid = self._dialect.create_xid()
self._transaction = TwoPhaseTransaction(self, xid)
await self.execute("XA START %s", xid)
return self._transaction
async def _prepare_twophase_impl(self, xid):
await self.execute("XA END '%s'" % xid)
await self.execute("XA PREPARE '%s'" % xid)
async def recover_twophase(self):
"""Return a list of prepared twophase transaction ids."""
result = await self.execute("XA RECOVER;")
return [row[0] for row in result]
async def rollback_prepared(self, xid, *, is_prepared=True):
"""Rollback prepared twophase transaction."""
if not is_prepared:
await self.execute("XA END '%s'" % xid)
await self.execute("XA ROLLBACK '%s'" % xid)
async def commit_prepared(self, xid, *, is_prepared=True):
"""Commit prepared twophase transaction."""
if not is_prepared:
await self.execute("XA END '%s'" % xid)
await self.execute("XA COMMIT '%s'" % xid)
@property
def in_transaction(self):
"""Return True if a transaction is in progress."""
return self._transaction is not None and self._transaction.is_active
async def close(self):
"""Close this SAConnection.
This results in a release of the underlying database
resources, that is, the underlying connection referenced
internally. The underlying connection is typically restored
back to the connection-holding Pool referenced by the Engine
that produced this SAConnection. Any transactional state
present on the underlying connection is also unconditionally
released via calling Transaction.rollback() method.
After .close() is called, the SAConnection is permanently in a
closed state, and will allow no further operations.
"""
if self._connection is None:
return
if self._transaction is not None:
await self._transaction.rollback()
self._transaction = None
# don't close underlying connection, it can be reused by pool
# conn.close()
self._engine.release(self)
self._connection = None
self._engine = None
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.close()
def _distill_params(multiparams, params):
"""Given arguments from the calling form *multiparams, **params,
return a list of bind parameter structures, usually a list of
dictionaries.
In the case of 'raw' execution which accepts positional parameters,
it may be a list of tuples or lists.
"""
if not multiparams:
if params:
return [params]
else:
return []
elif len(multiparams) == 1:
zero = multiparams[0]
if isinstance(zero, (list, tuple)):
if not zero or hasattr(zero[0], '__iter__') and \
not hasattr(zero[0], 'strip'):
# execute(stmt, [{}, {}, {}, ...])
# execute(stmt, [(), (), (), ...])
return zero
else:
# execute(stmt, ("value", "value"))
return [zero]
elif hasattr(zero, 'keys'):
# execute(stmt, {"key":"value"})
return [zero]
else:
# execute(stmt, "value")
return [[zero]]
else:
if (hasattr(multiparams[0], '__iter__') and
not hasattr(multiparams[0], 'strip')):
return multiparams
else:
return [multiparams]
| mit | 8,164,875,323,287,730,000 | 34.047059 | 79 | 0.567237 | false |
gabriel-stan/gestion-tfg | old/controller/ws/tests/old_test_login.py | 1 | 1172 | __author__ = 'tonima'
from django.contrib.auth.models import Group
from django.test import TestCase
import simplejson as json
from rest_framework.test import APIClient
from model.models import Profesor
class TfgServicesTests(TestCase):
def setUp(self):
self.client = APIClient()
self.grupo_profesores = Group.objects.get(name='Profesores')
self.data_alum1 = dict(username='[email protected]', first_name='alumno 1',
last_name='apellido 1 apellido 12', password='75169052')
self.data_login = Profesor(username='[email protected]', first_name='profesor 1',
last_name='apellido 1 apellido 12', departamento='el mas mejor', password='75169052')
self.data_login.save()
self.grupo_profesores.user_set.add(self.data_login)
def test_ws_alumnos_error(self):
# inserto un alumno
res = self.client.login(username='[email protected]', password='75169052')
self.assertEqual(res, True)
res = self.client.post('/alumnos/', self.data_alum1)
resul = json.loads(res.content)
self.assertEqual(resul['status'], True)
| gpl-2.0 | 3,818,691,677,031,575,000 | 38.066667 | 116 | 0.654437 | false |
sebleier/django-alpaca | tests/project/settings.py | 1 | 4735 | # Django settings for project project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'project.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'vq^o%2=s(srbps_%coen23cqm3%z&$ti@4vu0red2ngkj_tl0_'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'project.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'project.blog',
'south',
'alpaca',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| bsd-3-clause | 9,094,194,376,573,882,000 | 33.311594 | 122 | 0.682999 | false |
yephper/django | tests/db_typecasts/tests.py | 1 | 2550 | # Unit tests for typecast functions in django.db.backends.util
import datetime
import unittest
from django.db.backends import utils as typecasts
from django.utils import six
TEST_CASES = {
'typecast_date': (
('', None),
(None, None),
('2005-08-11', datetime.date(2005, 8, 11)),
('1990-01-01', datetime.date(1990, 1, 1)),
),
'typecast_time': (
('', None),
(None, None),
('0:00:00', datetime.time(0, 0)),
('0:30:00', datetime.time(0, 30)),
('8:50:00', datetime.time(8, 50)),
('08:50:00', datetime.time(8, 50)),
('12:00:00', datetime.time(12, 00)),
('12:30:00', datetime.time(12, 30)),
('13:00:00', datetime.time(13, 00)),
('23:59:00', datetime.time(23, 59)),
('00:00:12', datetime.time(0, 0, 12)),
('00:00:12.5', datetime.time(0, 0, 12, 500000)),
('7:22:13.312', datetime.time(7, 22, 13, 312000)),
),
'typecast_timestamp': (
('', None),
(None, None),
('2005-08-11 0:00:00', datetime.datetime(2005, 8, 11)),
('2005-08-11 0:30:00', datetime.datetime(2005, 8, 11, 0, 30)),
('2005-08-11 8:50:30', datetime.datetime(2005, 8, 11, 8, 50, 30)),
('2005-08-11 8:50:30.123', datetime.datetime(2005, 8, 11, 8, 50, 30, 123000)),
('2005-08-11 8:50:30.9', datetime.datetime(2005, 8, 11, 8, 50, 30, 900000)),
('2005-08-11 8:50:30.312-05', datetime.datetime(2005, 8, 11, 8, 50, 30, 312000)),
('2005-08-11 8:50:30.312+02', datetime.datetime(2005, 8, 11, 8, 50, 30, 312000)),
# ticket 14453
('2010-10-12 15:29:22.063202', datetime.datetime(2010, 10, 12, 15, 29, 22, 63202)),
('2010-10-12 15:29:22.063202-03', datetime.datetime(2010, 10, 12, 15, 29, 22, 63202)),
('2010-10-12 15:29:22.063202+04', datetime.datetime(2010, 10, 12, 15, 29, 22, 63202)),
('2010-10-12 15:29:22.0632021', datetime.datetime(2010, 10, 12, 15, 29, 22, 63202)),
('2010-10-12 15:29:22.0632029', datetime.datetime(2010, 10, 12, 15, 29, 22, 63202)),
),
}
class DBTypeCasts(unittest.TestCase):
def test_typeCasts(self):
for k, v in six.iteritems(TEST_CASES):
for inpt, expected in v:
got = getattr(typecasts, k)(inpt)
self.assertEqual(
got,
expected,
"In %s: %r doesn't match %r. Got %r instead." % (k, inpt, expected, got)
)
| bsd-3-clause | -423,774,317,204,394,940 | 40.5 | 94 | 0.523529 | false |
pierreg/tensorflow | tensorflow/tools/gcs_test/python/gcs_smoke.py | 1 | 5556 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Smoke test for reading records from GCS to TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import sys
import time
import numpy as np
import tensorflow as tf
from tensorflow.core.example import example_pb2
from tensorflow.python.lib.io import file_io
flags = tf.app.flags
flags.DEFINE_string("gcs_bucket_url", "",
"The URL to the GCS bucket in which the temporary "
"tfrecord file is to be written and read, e.g., "
"gs://my-gcs-bucket/test-directory")
flags.DEFINE_integer("num_examples", 10, "Number of examples to generate")
FLAGS = flags.FLAGS
def create_examples(num_examples, input_mean):
"""Create ExampleProto's containg data."""
ids = np.arange(num_examples).reshape([num_examples, 1])
inputs = np.random.randn(num_examples, 1) + input_mean
target = inputs - input_mean
examples = []
for row in range(num_examples):
ex = example_pb2.Example()
ex.features.feature["id"].bytes_list.value.append(str(ids[row, 0]))
ex.features.feature["target"].float_list.value.append(target[row, 0])
ex.features.feature["inputs"].float_list.value.append(inputs[row, 0])
examples.append(ex)
return examples
def create_dir_test():
"""Verifies file_io directory handling methods ."""
starttime = int(round(time.time() * 1000))
dir_name = "%s/tf_gcs_test_%s" % (FLAGS.gcs_bucket_url, starttime)
print("Creating dir %s" % dir_name)
file_io.create_dir(dir_name)
elapsed = int(round(time.time() * 1000)) - starttime
print("Created directory in: %d milliseconds" % elapsed)
# Check that the directory exists.
dir_exists = file_io.is_directory(dir_name)
print("%s directory exists: %s" % (dir_name, dir_exists))
# List contents of just created directory.
print("Listing directory %s." % dir_name)
starttime = int(round(time.time() * 1000))
print(file_io.list_directory(dir_name))
elapsed = int(round(time.time() * 1000)) - starttime
print("Listed directory %s in %s milliseconds" % (dir_name, elapsed))
# Delete directory.
print("Deleting directory %s." % dir_name)
starttime = int(round(time.time() * 1000))
file_io.delete_recursively(dir_name)
elapsed = int(round(time.time() * 1000)) - starttime
print("Deleted directory %s in %s milliseconds" % (dir_name, elapsed))
if __name__ == "__main__":
# Sanity check on the GCS bucket URL.
if not FLAGS.gcs_bucket_url or not FLAGS.gcs_bucket_url.startswith("gs://"):
print("ERROR: Invalid GCS bucket URL: \"%s\"" % FLAGS.gcs_bucket_url)
sys.exit(1)
# Generate random tfrecord path name.
input_path = FLAGS.gcs_bucket_url + "/"
input_path += "".join(random.choice("0123456789ABCDEF") for i in range(8))
input_path += ".tfrecord"
print("Using input path: %s" % input_path)
# Verify that writing to the records file in GCS works.
print("\n=== Testing writing and reading of GCS record file... ===")
example_data = create_examples(FLAGS.num_examples, 5)
with tf.python_io.TFRecordWriter(input_path) as hf:
for e in example_data:
hf.write(e.SerializeToString())
print("Data written to: %s" % input_path)
# Verify that reading from the tfrecord file works and that
# tf_record_iterator works.
record_iter = tf.python_io.tf_record_iterator(input_path)
read_count = 0
for r in record_iter:
read_count += 1
print("Read %d records using tf_record_iterator" % read_count)
if read_count != FLAGS.num_examples:
print("FAIL: The number of records read from tf_record_iterator (%d) "
"differs from the expected number (%d)" % (read_count,
FLAGS.num_examples))
sys.exit(1)
# Verify that running the read op in a session works.
print("\n=== Testing TFRecordReader.read op in a session... ===")
with tf.Graph().as_default() as g:
filename_queue = tf.train.string_input_producer([input_path], num_epochs=1)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
sess.run(tf.initialize_local_variables())
tf.train.start_queue_runners()
index = 0
for _ in range(FLAGS.num_examples):
print("Read record: %d" % index)
sess.run(serialized_example)
index += 1
# Reading one more record should trigger an exception.
try:
sess.run(serialized_example)
print("FAIL: Failed to catch the expected OutOfRangeError while "
"reading one more record than is available")
sys.exit(1)
except tf.errors.OutOfRangeError:
print("Successfully caught the expected OutOfRangeError while "
"reading one more record than is available")
create_dir_test()
| apache-2.0 | 7,579,355,250,762,052,000 | 38.404255 | 80 | 0.667207 | false |
mekery/pdfdig | build/scripts-2.7/pdftotext.py | 1 | 2534 | #!/usr/bin/python
'''
@summary: Text's command line script.
Convert pdf to text based on pdftotext.
@author: Micle Bu <[email protected]>
@copyright: Copyright © 2012 Micle Bu
@license: BSD New
@version: pdftotext.py 2012-03-29 Micle Bu
'''
import sys
import string
from pdfdig.pdftext import Text
def main(argv):
import getopt
def usage():
print ('Usage: %s [Option] File ...\n'
'Options:\n'
' -o, --output OUTFILE \n'
' Specify the output file. \n'
' -y, --layout [layout|raw] \n'
' Maintain the layout of the text. \n'
' "layout" preserve the original physical layout of the text. \n'
' "raw" keep the text in content stream order. This is the default setting. \n'
' -f, --first-page INT \n'
' First page to convert. \n'
' -l, --last-page INT \n'
' Last page to convert. \n'
' -p, --page INT \n'
' Specify a page to convert. \n'
' -h, --help \n'
' Print usage information. \n' % argv[0])
return 100
try:
(opts, args) = getopt.getopt(argv[1:], 'o:y:f:l:p:h',
['output=','layout=','first-page=','last-page=','pageno=','help'])
except getopt.GetoptError:
return usage()
if not args: return usage()
# option
outfile = None
layout = 'raw'
first = 1
last = 100000
pageno = None
for (k, v) in opts:
if k in ('-o', '--output'): outfile = v
elif k in ('-y', '--layout'): layout = v
elif k in ('-f', '--first-page'): first = string.atoi(v)
elif k in ('-l', '--last-page'): last = string.atoi(v)
elif k in ('-p', '--pageno'): pageno = string.atoi(v)
elif k in ('-h', '--help'): return usage()
# output
if outfile:
f = file(outfile, 'w')
else:
f = sys.stdout
# pdftotext
for pdffile in args:
# pdftext
pc = Text(pdffile, layout=layout)
pages = pc.content
if pageno:
if pageno <= pc.pagecount:
f.write('{0}\n'.format(pages[pageno-1]))
else:
print "Invalide page number!"
else:
f.write('{0}\n'.format(''.join(pages[first-1:last])))
f.close()
return
if __name__ == '__main__': sys.exit(main(sys.argv)) | bsd-3-clause | -4,973,638,659,855,998,000 | 33.256757 | 103 | 0.485793 | false |
zozo123/buildbot | master/buildbot/steps/shellsequence.py | 1 | 4370 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot import config
from buildbot.process import buildstep
from buildbot.status import results
from twisted.internet import defer
class ShellArg(results.ResultComputingConfigMixin):
publicAttributes = (
results.ResultComputingConfigMixin.resultConfig +
["command", "logfile"])
def __init__(self, command=None, logfile=None, **kwargs):
name = self.__class__.__name__
if command is None:
config.error("the 'command' parameter of %s "
"must not be None" % (name,))
self.command = command
self.logfile = logfile
for k, v in kwargs.iteritems():
if k not in self.resultConfig:
config.error("the parameter '%s' is not "
"handled by ShellArg" % (k,))
setattr(self, k, v)
# we don't validate anything yet as we can have renderables.
def validateAttributes(self):
# only make the check if we have a list
if not isinstance(self.command, (str, list)):
config.error("%s is an invalid command, "
"it must be a string or a list" % (self.command,))
if isinstance(self.command, list):
if not all([isinstance(x, str) for x in self.command]):
config.error("%s must only have strings in it" % (self.command,))
runConfParams = [(p_attr, getattr(self, p_attr)) for p_attr in self.resultConfig]
not_bool = [(p_attr, p_val) for (p_attr, p_val) in runConfParams if not isinstance(p_val,
bool)]
if not_bool:
config.error("%r must be booleans" % (not_bool,))
@defer.inlineCallbacks
def getRenderingFor(self, build):
for p_attr in self.publicAttributes:
res = yield build.render(getattr(self, p_attr))
setattr(self, p_attr, res)
defer.returnValue(self)
class ShellSequence(buildstep.ShellMixin, buildstep.BuildStep):
renderables = ['commands']
def __init__(self, commands=None, **kwargs):
self.commands = commands
kwargs = self.setupShellMixin(kwargs, prohibitArgs=['command'])
buildstep.BuildStep.__init__(self, **kwargs)
def shouldRunTheCommand(self, cmd):
return bool(cmd)
def getFinalState(self):
return self.describe(True)
@defer.inlineCallbacks
def runShellSequence(self, commands):
terminate = False
if commands is None:
defer.returnValue(results.EXCEPTION)
overall_result = results.SUCCESS
for arg in commands:
if not isinstance(arg, ShellArg):
defer.returnValue(results.EXCEPTION)
try:
arg.validateAttributes()
except config.ConfigErrors:
defer.returnValue(results.EXCEPTION)
# handle the command from the arg
command = arg.command
if not self.shouldRunTheCommand(command):
continue
# stick the command in self.command so that describe can use it
self.command = command
cmd = yield self.makeRemoteShellCommand(command=command,
stdioLogName=arg.logfile)
yield self.runCommand(cmd)
overall_result, terminate = results.computeResultAndTermination(
arg, cmd.results(), overall_result)
if terminate:
break
defer.returnValue(overall_result)
def run(self):
return self.runShellSequence(self.commands)
| gpl-3.0 | -9,173,960,222,634,424,000 | 39.091743 | 97 | 0.613272 | false |
openstack/senlin | senlin/objects/requests/actions.py | 1 | 3059 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import versionutils
from senlin.common import consts
from senlin.objects import base
from senlin.objects import fields
@base.SenlinObjectRegistry.register
class ActionCreateRequestBody(base.SenlinObject):
fields = {
'name': fields.NameField(),
'cluster_id': fields.StringField(),
'action': fields.StringField(),
'inputs': fields.JsonField(nullable=True, default={}),
}
@base.SenlinObjectRegistry.register
class ActionCreateRequest(base.SenlinObject):
fields = {
'action': fields.ObjectField('ActionCreateRequestBody')
}
@base.SenlinObjectRegistry.register
class ActionListRequest(base.SenlinObject):
action_name_list = list(consts.CLUSTER_ACTION_NAMES)
action_name_list.extend(list(consts.NODE_ACTION_NAMES))
VERSION = '1.1'
VERSION_MAP = {
'1.14': '1.1'
}
fields = {
'name': fields.ListOfStringsField(nullable=True),
'cluster_id': fields.ListOfStringsField(nullable=True),
'action': fields.ListOfEnumField(
valid_values=action_name_list, nullable=True),
'target': fields.ListOfStringsField(nullable=True),
'status': fields.ListOfEnumField(
valid_values=list(consts.ACTION_STATUSES), nullable=True),
'limit': fields.NonNegativeIntegerField(nullable=True),
'marker': fields.UUIDField(nullable=True),
'sort': fields.SortField(
valid_keys=list(consts.ACTION_SORT_KEYS), nullable=True),
'project_safe': fields.FlexibleBooleanField(default=True)
}
def obj_make_compatible(self, primitive, target_version):
super(ActionListRequest, self).obj_make_compatible(
primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 14):
if 'cluster_id' in primitive['senlin_object.data']:
del primitive['senlin_object.data']['cluster_id']
@base.SenlinObjectRegistry.register
class ActionGetRequest(base.SenlinObject):
fields = {
'identity': fields.StringField(),
}
@base.SenlinObjectRegistry.register
class ActionDeleteRequest(base.SenlinObject):
fields = {
'identity': fields.StringField()
}
@base.SenlinObjectRegistry.register
class ActionUpdateRequest(base.SenlinObject):
fields = {
'identity': fields.StringField(),
'status': fields.StringField(),
'force': fields.BooleanField(default=False)
}
| apache-2.0 | -84,909,250,368,545,440 | 30.864583 | 78 | 0.691402 | false |
Mzero2010/MaxZone | plugin.video.Mzero/core/downloader.py | 1 | 15031 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Mzero 4
# Copyright 2015 [email protected]
# http://blog.tvalacarta.info/plugin-xbmc/Mzero/
#
# Distributed under the terms of GNU General Public License v3 (GPLv3)
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------
# This file is part of Mzero 4.
#
# Mzero 4 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mzero 4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mzero 4. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------
"""
Clase Downloader
Downloader(url, path [, filename, headers, resume])
url : string - url para descargar
path : string - Directorio donde se guarda la descarga
filename : [opt] string - Nombre de archivo para guardar
headers : [opt] dict - Headers para usar en la descarga
resume : [opt] bool - continuar una descarga previa en caso de existir, por defecto True
metodos:
start() Inicia la descarga
stop(erase = False) Detiene la descarga, con erase = True elimina los datos descargados
"""
import sys
import os
import re
import urllib2
import urllib
import urlparse
import mimetypes
import time
from core import filetools
from threading import Thread, Lock
class Downloader:
#Informacion:
@property
def state(self):
return self._state
@property
def connections(self):
return len([c for c in self._download_info["parts"] if c["status"] in[self.states.downloading, self.states.connecting]]), self._max_connections
@property
def downloaded(self):
return self.__change_units__(sum([c["current"] - c["start"] for c in self._download_info["parts"]]))
@property
def average_speed(self):
return self.__change_units__(self._average_speed)
@property
def speed(self):
return self.__change_units__(self._speed)
@property
def remaining_time(self):
if self.speed[0] and self._file_size:
t = (self.size[0] - self.downloaded[0]) / self.speed[0]
else:
t = 0
return time.strftime("%H:%M:%S", time.gmtime(t))
@property
def download_url(self):
return self.url
@property
def size(self):
return self.__change_units__(self._file_size)
@property
def progress(self):
if self._file_size:
return float(self.downloaded[0]) * 100 / float(self._file_size)
elif self._state == self.states.completed:
return 100
else:
return 0
@property
def filename(self):
return self._filename
@property
def fullpath(self):
return os.path.abspath(filetools.join(self._path, self._filename))
#Funciones
def start(self):
if self._state == self.states.error: return
self._start_time = time.time() -1
self._state = self.states.downloading
for t in self._threads: t.start()
self._speed_thread.start()
def stop(self, erase=False):
if self._state == self.states.downloading:
#Detenemos la descarga
self._state = self.states.stopped
for t in self._threads:
if t.isAlive(): t.join()
#Guardamos la info al final del archivo
self.file.seek(0,2)
offset = self.file.tell()
self.file.write(str(self._download_info))
self.file.write("%0.16d" % offset)
self.file.close()
if erase: os.remove(filetools.join(self._path, self._filename))
def __speed_metter__(self):
self._speed = 0
self._average_speed = 0
downloaded = self._start_downloaded
downloaded2 = self._start_downloaded
t = time.time()
t2 = time.time()
time.sleep(1)
while self.state == self.states.downloading:
self._average_speed = (self.downloaded[0] - self._start_downloaded) / (time.time() - self._start_time)
self._speed = (self.downloaded[0] - self._start_downloaded) / (time.time() - self._start_time)
#self._speed = (self.downloaded[0] - downloaded) / (time.time() -t)
if time.time() -t > 5:
t = t2
downloaded = downloaded2
t2 = time.time()
downloaded2 = self.downloaded[0]
time.sleep(0.5)
#Funciones internas
def __init__(self, url, path, filename=None, headers=[], resume = True, max_connections = 10, part_size = 2097152):
#Parametros
self._resume = resume
self._path = path
self._filename = filename
self._max_connections = max_connections
self._part_size = part_size
self.states = type('states', (), {"stopped":0, "connecting": 1, "downloading": 2, "completed": 3, "error": 4})
self._block_size = 1024*100
self._state = self.states.stopped
self._write_lock = Lock()
self._download_lock = Lock()
self._headers = {"User-Agent":"Kodi/15.2 (Windows NT 10.0; WOW64) App_Bitness/32 Version/15.2-Git:20151019-02e7013"}
self._speed = 0
self._threads = [Thread(target= self.__start_part__) for x in range(self._max_connections)]
self._speed_thread = Thread(target= self.__speed_metter__)
#Actualizamos los headers
self._headers.update(dict(headers))
#Separamos los headers de la url
self.__url_to_headers__(url)
#Obtenemos la info del servidor
self.__get_download_headers__()
self._file_size = int(self.response_headers.get("content-length", "0"))
if not self.response_headers.get("accept-ranges") == "bytes" or self._file_size == 0:
self._max_connections = 1
self._part_size = 0
self._resume = False
#Obtenemos el nombre del archivo
self.__get_download_filename__()
#Abrimos en modo "a+" para que cree el archivo si no existe, luego en modo "r+b" para poder hacer seek()
self.file = filetools.file_open(filetools.join(self._path, self._filename), "a+")
self.file = filetools.file_open(filetools.join(self._path, self._filename), "r+b")
self.__get_download_info__()
def __url_to_headers__(self, url):
#Separamos la url de los headers adicionales
self.url = url.split("|")[0]
#headers adicionales
if "|" in url:
self._headers.update(dict([[header.split("=")[0],urllib.unquote_plus(header.split("=")[1])] for header in url.split("|")[1].split("&")]))
def __get_download_headers__(self):
for x in range(3):
try:
if not sys.hexversion > 0x0204FFFF:
conn = urllib2.urlopen(urllib2.Request(self.url, headers=self._headers))
conn.fp._sock.close()
else:
conn = urllib2.urlopen(urllib2.Request(self.url, headers=self._headers), timeout=5)
except:
self.response_headers = dict()
self._state = self.states.error
else:
self.response_headers = conn.headers.dict
self._state = self.states.stopped
break
def __get_download_filename__(self):
#Obtenemos nombre de archivo y extension
if "filename" in self.response_headers.get("content-disposition","") and "attachment" in self.response_headers.get("content-disposition",""):
cd_filename, cd_ext = os.path.splitext(urllib.unquote_plus(re.compile("attachment; filename ?= ?[\"|']?([^\"']+)[\"|']?").match(self.response_headers.get("content-disposition")).group(1)))
if "filename" in self.response_headers.get("content-disposition","") and "inline" in self.response_headers.get("content-disposition",""):
cd_filename, cd_ext = os.path.splitext(urllib.unquote_plus(re.compile("inline; filename ?= ?[\"|']?([^\"']+)[\"|']?").match(self.response_headers.get("content-disposition")).group(1)))
else:
cd_filename, cd_ext = "",""
url_filename, url_ext = os.path.splitext(urllib.unquote_plus(filetools.basename(urlparse.urlparse(self.url)[2])))
if self.response_headers.get("content-type","application/octet-stream") <> "application/octet-stream":
mime_ext = mimetypes.guess_extension(self.response_headers.get("content-type"))
else:
mime_ext = ""
#Seleccionamos el nombre mas adecuado
if cd_filename:
self.remote_filename = cd_filename
if not self._filename:
self._filename = cd_filename
elif url_filename:
self.remote_filename = url_filename
if not self._filename:
self._filename = url_filename
#Seleccionamos la extension mas adecuada
if cd_ext:
if not cd_ext in self._filename: self._filename += cd_ext
if self.remote_filename: self.remote_filename += cd_ext
elif mime_ext:
if not mime_ext in self._filename: self._filename += mime_ext
if self.remote_filename: self.remote_filename += mime_ext
elif url_ext:
if not url_ext in self._filename: self._filename += url_ext
if self.remote_filename: self.remote_filename += url_ext
def __change_units__(self, value):
import math
units = ["B", "KB", "MB", "GB"]
if value <= 0:
return 0, 0, units[0]
else:
return value, value / 1024.0 ** int(math.log(value,1024)), units[int(math.log(value,1024))]
def __get_download_info__(self):
#Continuamos con una descarga que contiene la info al final del archivo
self._download_info = {}
try:
assert self._resume
self.file.seek(-16,2)
offset = int(self.file.read())
self.file.seek(offset)
a = self.file.read()[:-16]
self._download_info = eval(a)
assert self._download_info["size"] == self._file_size
assert self._download_info["url"] == self.url
self.file.seek(offset)
self.file.truncate()
self._start_downloaded = sum([c["current"] - c["start"] for c in self._download_info["parts"]])
self.pending_parts = [x for x, a in enumerate(self._download_info["parts"]) if not a["status"] == self.states.completed]
#La info no existe o no es correcta, comenzamos de 0
except:
self._download_info["parts"] = []
if self._file_size and self._part_size:
for x in range(0,self._file_size, self._part_size):
end = x + self._part_size -1
if end >= self._file_size: end = self._file_size -1
self._download_info["parts"].append({"start": x, "end": end, "current": x, "status": self.states.stopped})
else:
self._download_info["parts"].append({"start": 0, "end": self._file_size-1, "current": 0, "status": self.states.stopped})
self._download_info["size"] = self._file_size
self._download_info["url"] = self.url
self._start_downloaded = 0
self.pending_parts = [x for x in range(len(self._download_info["parts"]))]
self.file.seek(0)
self.file.truncate()
def __open_connection__(self, start, end):
headers = self._headers.copy()
if not end: end = ""
headers.update({"Range": "bytes=%s-%s" % (start, end)})
if not sys.hexversion > 0x0204FFFF:
conn = urllib2.urlopen(urllib2.Request(self.url, headers=headers))
else:
conn = urllib2.urlopen(urllib2.Request(self.url, headers=headers), timeout=5)
return conn
def __start_part__(self):
while self._state == self.states.downloading:
self._download_lock.acquire()
if len(self.pending_parts):
id = min(self.pending_parts)
self.pending_parts.remove(id)
self._download_lock.release()
#Si no, Termina el thread
else:
if len([x for x, a in enumerate(self._download_info["parts"]) if a["status"] in [self.states.downloading, self.states.connecting]]) == 0:
self._state = self.states.completed
self.file.close()
self._download_lock.release()
break
#Si comprueba si ya está completada, y si lo esta, pasa a la siguiente
if self._download_info["parts"][id]["current"] > self._download_info["parts"][id]["end"] and self._download_info["parts"][id]["end"] > -1:
self._download_info["parts"][id]["status"] = self.states.completed
continue
#Marca el estado como conectando
self._download_info["parts"][id]["status"] = self.states.connecting
#Intenta la conixion, en caso de error, vuelve a poner la parte en la lista de pendientes
try:
connection = self.__open_connection__(self._download_info["parts"][id]["current"], self._download_info["parts"][id]["end"])
except:
self._download_info["parts"][id]["status"] = self.states.error
self.pending_parts.append(id)
time.sleep(5)
continue
else:
self._download_info["parts"][id]["status"] = self.states.downloading
#Comprobamos que el trozo recibido es el que necesitamos
if self._download_info["parts"][id]["current"] <> int(connection.info().get("content-range","bytes 0-").split(" ")[1].split("-")[0]):
self._download_info["parts"][id]["status"] = self.states.error
self.pending_parts.append(id)
continue
while self._state == self.states.downloading:
try:
buffer = connection.read(self._block_size)
except:
self._download_info["parts"][id]["status"] = self.states.error
self.pending_parts.append(id)
break
else:
if len(buffer):
self._write_lock.acquire()
self.file.seek(self._download_info["parts"][id]["current"])
self.file.write(buffer)
self._download_info["parts"][id]["current"] +=len(buffer)
self._write_lock.release()
else:
connection.fp._sock.close()
self._download_info["parts"][id]["status"] = self.states.completed
break
if self._download_info["parts"][id]["status"] == self.states.downloading:
self._download_info["parts"][id]["status"] = self.states.stopped | gpl-3.0 | 5,840,123,317,750,125,000 | 36.297767 | 198 | 0.586361 | false |
googleads/google-ads-python | google/ads/googleads/v8/services/services/age_range_view_service/client.py | 1 | 18043 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.resources.types import age_range_view
from google.ads.googleads.v8.services.types import age_range_view_service
from .transports.base import AgeRangeViewServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import AgeRangeViewServiceGrpcTransport
class AgeRangeViewServiceClientMeta(type):
"""Metaclass for the AgeRangeViewService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[AgeRangeViewServiceTransport]]
_transport_registry["grpc"] = AgeRangeViewServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[AgeRangeViewServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AgeRangeViewServiceClient(metaclass=AgeRangeViewServiceClientMeta):
"""Service to manage age range views."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AgeRangeViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AgeRangeViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> AgeRangeViewServiceTransport:
"""Return the transport used by the client instance.
Returns:
AgeRangeViewServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def age_range_view_path(
customer_id: str, ad_group_id: str, criterion_id: str,
) -> str:
"""Return a fully-qualified age_range_view string."""
return "customers/{customer_id}/ageRangeViews/{ad_group_id}~{criterion_id}".format(
customer_id=customer_id,
ad_group_id=ad_group_id,
criterion_id=criterion_id,
)
@staticmethod
def parse_age_range_view_path(path: str) -> Dict[str, str]:
"""Parse a age_range_view path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/ageRangeViews/(?P<ad_group_id>.+?)~(?P<criterion_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, AgeRangeViewServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the age range view service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AgeRangeViewServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AgeRangeViewServiceTransport):
# transport is a AgeRangeViewServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = AgeRangeViewServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_age_range_view(
self,
request: age_range_view_service.GetAgeRangeViewRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> age_range_view.AgeRangeView:
r"""Returns the requested age range view in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.GetAgeRangeViewRequest`):
The request object. Request message for
[AgeRangeViewService.GetAgeRangeView][google.ads.googleads.v8.services.AgeRangeViewService.GetAgeRangeView].
resource_name (:class:`str`):
Required. The resource name of the
age range view to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.AgeRangeView:
An age range view.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a age_range_view_service.GetAgeRangeViewRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, age_range_view_service.GetAgeRangeViewRequest
):
request = age_range_view_service.GetAgeRangeViewRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_age_range_view
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("AgeRangeViewServiceClient",)
| apache-2.0 | -6,773,315,631,857,939,000 | 40.28833 | 124 | 0.619853 | false |
goodmami/pydelphin | tests/mrs_Mrs_test.py | 1 | 4294 | # -*- coding: UTF-8 -*-
import pytest
from delphin.mrs.components import (
Pred,
ElementaryPredication as EP,
elementarypredications as eps,
HandleConstraint as Hcons,
hcons,
IndividualConstraint as Icons,
icons,
)
from delphin.mrs.config import (FIRST_NODEID, UNKNOWNSORT)
from delphin.mrs import Mrs
#from delphin.mrs import simplemrs # for convenience in later tests
from delphin.exceptions import XmrsError
sp = Pred.surface
# for convenience
def check_xmrs(x, top, index, xarg, eplen, hconslen, iconslen, varslen):
assert x.top == top
assert x.index == index
assert x.xarg == xarg
assert len(x.eps()) == eplen
assert len(x.hcons()) == hconslen
assert len(x.icons()) == iconslen
assert len(x.variables()) == varslen
class TestMrs():
def test_empty(self):
x = Mrs()
# Mrs view
assert len(eps(x)) == 0
assert len(hcons(x)) == 0
assert len(icons(x)) == 0
# Xmrs members
check_xmrs(x, None, None, None, 0, 0, 0, 0)
def test_single_ep(self):
# basic, one EP, no TOP
x = Mrs(rels=[EP(10, sp('"_rain_v_1_rel"'), 'h1')])
check_xmrs(x, None, None, None, 1, 0, 0, 1)
# variables don't need to be created predictably, but it's nice
# to get the expected values for simple cases
assert x.label(10) == 'h1'
assert x.ep(10).iv == None
# now with ARG0
x = Mrs(rels=[EP(10, sp('"_rain_v_1_rel"'), 'h1', {'ARG0': 'e2'})])
check_xmrs(x, None, None, None, 1, 0, 0, 2)
assert x.label(10) == 'h1'
assert x.ep(10).iv == 'e2'
# now with TOP
x = Mrs(
top='h0',
rels=[EP(10, sp('"_rain_v_1_rel"'), 'h1', {'ARG0': 'e2'})],
hcons=[('h0', 'qeq', 'h1')]
)
check_xmrs(x, 'h0', None, None, 1, 1, 0, 3)
assert x.label(10) == 'h1'
assert x.ep(10).iv == 'e2'
def test_to_dict(self):
assert Mrs().to_dict() == {
'relations': [], 'constraints': [], 'variables': {}
}
x = Mrs(rels=[EP(10, sp('"_rain_v_1_rel"'), 'h1', {'ARG0': 'e2'})])
assert x.to_dict() == {
'relations': [
{'label': 'h1', 'predicate': '_rain_v_1',
'arguments': {'ARG0': 'e2'}}
],
'constraints': [],
'variables': {'h1': {'type': 'h'}, 'e2': {'type': 'e'}}
}
x = Mrs(
top='h0',
rels=[EP(10, sp('"_rain_v_1_rel"'), 'h1', {'ARG0': 'e2'})],
hcons=[('h0', 'qeq', 'h1')],
vars={'e2': {'SF': 'prop', 'TENSE': 'pres'}}
)
assert x.to_dict() == {
'top': 'h0',
'relations': [
{'label': 'h1', 'predicate': '_rain_v_1',
'arguments': {'ARG0': 'e2'}}
],
'constraints': [{'relation': 'qeq', 'high': 'h0', 'low': 'h1'}],
'variables': {
'h0': {'type': 'h'}, 'h1': {'type': 'h'},
'e2': {'type': 'e',
'properties': {'SF': 'prop', 'TENSE': 'pres'}}
}
}
assert x.to_dict(properties=False) == {
'top': 'h0',
'relations': [
{'label': 'h1', 'predicate': '_rain_v_1',
'arguments': {'ARG0': 'e2'}}
],
'constraints': [{'relation': 'qeq', 'high': 'h0', 'low': 'h1'}],
'variables': {
'h0': {'type': 'h'}, 'h1': {'type': 'h'},
'e2': {'type': 'e'}
}
}
def test_from_dict(self):
assert Mrs.from_dict({}) == Mrs()
m1 = Mrs.from_dict({
'relations': [
{'label': 'h1', 'predicate': '_rain_v_1',
'arguments': {'ARG0': 'e2'}}
],
'constraints': [],
'variables': {
'h1': {'type': 'h'},
'e2': {'type': 'e',
'properties': {'SF': 'prop', 'TENSE': 'pres'}}
}
})
m2 = Mrs(
rels=[
EP(FIRST_NODEID, sp('"_rain_v_1_rel"'), 'h1', {'ARG0': 'e2'})
],
vars={'e2': {'SF': 'prop', 'TENSE': 'pres'}}
)
assert m1 == m2
| mit | -6,807,745,565,386,029,000 | 30.807407 | 77 | 0.43293 | false |
fnivek/Pop-a-Gator | scripts/game_sir_interface.py | 1 | 3921 | #!/usr/bin/env python
from bluetooth import *
import sys
from collections import OrderedDict
import argparse
import serial
# Take in command line args
parser = argparse.ArgumentParser(description='Interface with a game sir remote')
parser.add_argument('--pass_to_serial', action='store_true',
help='Pass the bluetooth data over to the serial connection')
parser.add_argument('--print_log', action='store_true',
help='Print the log of all raw data')
parser.add_argument('--device', default="/dev/serial/by-id/usb-PopaGator_Toad-if00",
help='Name of the serial device to pass the bluetooth data to')
cmd_args = parser.parse_args()
# Connect to serial interface
ser = serial.Serial()
if cmd_args.pass_to_serial:
print "Connecting to device:\t" + cmd_args.device + "..."
# Open a serial port
ser = serial.Serial("/dev/serial/by-id/usb-PopaGator_Toad-if00", 115200)
# Send data to start USB OTG
ser.write("start")
print "Connected to device:\t" + cmd_args.device
# Connect to bluetooth
print "Connecting to gamesir over bluetooth..."
services = find_service()
gamepad = None
for svc in services:
if svc['name'] == 'SPP Channel':
gamepad = svc
if gamepad is None:
print "Failed to find gamepad"
sys.exit(0)
protocol = gamepad['protocol']
if protocol == 'RFCOMM':
protocol = RFCOMM
elif protocol == 'L2CAP':
protocol = L2CAP
else:
print "Unkown service!"
sys.exit(0)
sock=BluetoothSocket( protocol )
sock.connect((gamepad['host'], int(gamepad['port'])))
print 'Connected to gamesir over bluetooth'
gamepad_map = OrderedDict()
gamepad_map['LEFT_STICK_LR'] = 2
gamepad_map['LEFT_STICK_UD'] = 3
gamepad_map['RIGHT_STICK_LR'] = 4
gamepad_map['RIGHT_STICK_UD'] = 5
gamepad_map['LEFT_TRIGGER'] = 6
gamepad_map['RIGHT_TRIGGER'] = 7
gamepad_map['ABXY_BUMPERS'] = 8
gamepad_map['SELECT_START_STICKS_?'] = 9 # The left and the right triggers if depresed far enough will set a bit
gamepad_map['DPAD'] = 10
button_bitmask = {
'ABXY_BUMPERS' : [('A', 0x1), ('B', 0x2), ('X', 0x8), ('Y', 0x10), ('LEFT_BUMPER', 0x40), ('RIGHT_BUMPER', 0x80)],
'SELECT_START_STICKS_?' : [('SELECT', 0x4), ('START', 0x8), ('LEFT_STICK', 0x20), ('RIGHT_STICK', 0x40) ]#('LEFT_TRIGGER', 0x1), ('RIGHT_TRIGGER', 0x2)]
}
dpad_map = {
0 : 'NOT_PRESSED',
1 : 'UP',
2 : 'UP_RIGHT',
3 : 'RIGHT',
4 : 'DOWN_RIGHT',
5 : 'DOWN',
6 : 'DOWN_LEFT',
7 : 'LEFT',
8 : 'UP_LEFT',
}
raw_data = ''
state = ''
try:
while True:
data = sock.recv(1024)
if cmd_args.pass_to_serial:
ser.write(data)
print '-----------------'
formated_data = [ord(c) for c in data]
print formated_data
for d in formated_data:
raw_data += str(d)
raw_data += ', '
if len(formated_data) < 10:
print 'Home button'
continue
state += '{0, '
for name, position in gamepad_map.iteritems():
output = name + " : " + str(formated_data[position])
if name in button_bitmask.keys():
for mask in button_bitmask[name]:
value = mask[1] & formated_data[position]
state += str(value)
state += ', '
output += "\n\t" + mask[0] + ": " + ('1' if (value) else '0')
elif name == "DPAD":
state += str(formated_data[position] & 0xF)
output += "\n\tDirection: " + dpad_map[formated_data[position]]
state += ', '
else:
state += str(formated_data[position])
state += ', '
print output
state += '},\n'
finally:
if cmd_args.print_log:
print raw_data
print '\n'
print state
sock.close() | mit | -7,996,774,728,194,808,000 | 28.488722 | 156 | 0.566692 | false |
openbmc/openbmc-test-automation | bin/event_notification_util.py | 1 | 1499 | #!/usr/bin/env python
r"""
See help text for details.
"""
import sys
save_dir_path = sys.path.pop(0)
modules = ['gen_arg', 'gen_print', 'gen_valid', 'event_notification']
for module in modules:
exec("from " + module + " import *")
sys.path.insert(0, save_dir_path)
parser = argparse.ArgumentParser(
usage='%(prog)s [OPTIONS]',
description="%(prog)s will subscribe and receive event notifications when "
+ "properties change for the given dbus path.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prefix_chars='-+')
parser.add_argument(
'--host',
default='',
help='The host name or IP of the system to subscribe to.')
parser.add_argument(
'--username',
default='root',
help='The username for the host system.')
parser.add_argument(
'--password',
default='',
help='The password for the host system.')
parser.add_argument(
'--dbus_path',
default='',
help='The path to be monitored (e.g. "/xyz/openbmc_project/sensors").')
parser.add_argument(
'--enable_trace',
choices=[0, 1],
default=0,
help='Indicates that trace needs to be enabled.')
# Populate stock_list with options we want.
stock_list = [("test_mode", 0), ("quiet", 0), ("debug", 0)]
def main():
gen_setup()
my_event = event_notification(host, username, password)
event_notifications = my_event.subscribe(dbus_path, enable_trace)
print_var(event_notifications, fmt=[no_header(), strip_brackets()])
main()
| apache-2.0 | -7,400,601,391,198,828,000 | 25.298246 | 79 | 0.651101 | false |
dvklopfenstein/biocode | src/pydvkbiology/dnld/uniprot/sprot_sce_geneid2acs.py | 1 | 138364 | """Selected UniProt data saved in Python."""
# Copyright (C) 2014-2019 DV Klopfenstein. All rights reserved
#
# ADAPTED TO PYTHON from the UniProt file:
# ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/complete
DOWNLOADED = "2019_03_07" # UniProt source files were downloaded on this date
# Contains 5912 items for "Saccharomyces cerevisiae ("
# 5,912 keys found
# 6,074 values found
# 5,915 values found (unique)
# pylint: disable=too-many-lines
GENEID2UNIPROTS = {
850289 : {'P25596'},
850290 : {'P25594'},
850291 : {'P25593'},
850292 : {'P0CY08', 'P0CY09'},
850293 : {'P0CY06', 'P0CY07'},
850295 : {'P25379'},
850296 : {'P25591'},
850297 : {'P25588'},
850298 : {'P25586'},
850299 : {'P25585'},
850300 : {'Q96VH5'},
850301 : {'P25375'},
850302 : {'P25584'},
850303 : {'P25583'},
850304 : {'P25582'},
850305 : {'P25580'},
850306 : {'P25579'},
850307 : {'P16550'},
850308 : {'P25577'},
850309 : {'P25380'},
850310 : {'P25576'},
850312 : {'P25574'},
850313 : {'P25573'},
850314 : {'P17967'},
850315 : {'P25572'},
850317 : {'P17709'},
850318 : {'P25569'},
850319 : {'P25568'},
850320 : {'P25567'},
850321 : {'P25370'},
850322 : {'P25373'},
850323 : {'P25369'},
850324 : {'P25566'},
850325 : {'P25344'},
850326 : {'P25368'},
850327 : {'P00815'},
850328 : {'P11709'},
850329 : {'P25367'},
850330 : {'P11710'},
850331 : {'Q96VH4'},
850332 : {'P37261'},
850333 : {'P25376'},
850334 : {'P25389'},
850338 : {'Q96VH3'},
850339 : {'P25383'},
850340 : {'P25384'},
850342 : {'P04173'},
850343 : {'P25374'},
850344 : {'P25559'},
850345 : {'P25558'},
850346 : {'P25555'},
850347 : {'P25554'},
850348 : {'P25605'},
850349 : {'P25604'},
850351 : {'P25587'},
850352 : {'P25578'},
850353 : {'P25565'},
850354 : {'P25560'},
850355 : {'P87012'},
850356 : {'Q96VH2'},
850358 : {'P25342'},
850359 : {'P25348'},
850360 : {'P25349'},
850361 : {'P08679'},
850364 : {'P25354'},
850366 : {'P25333'},
850367 : {'P25343'},
850368 : {'P25613'},
850369 : {'P25371'},
850370 : {'P00560'},
850372 : {'P25615'},
850373 : {'P25616'},
850375 : {'P25617'},
850376 : {'P25618'},
850377 : {'P09007'},
850381 : {'P23060'},
850382 : {'P25362'},
850383 : {'P23059'},
850384 : {'Q9URQ5'},
850385 : {'P25619'},
850387 : {'P25351'},
850388 : {'P25345'},
850389 : {'P32903'},
850391 : {'P25353'},
850392 : {'P25378'},
850394 : {'P25621'},
850395 : {'P32445'},
850396 : {'P25623'},
850397 : {'P06367'},
850398 : {'P25356'},
850399 : {'P25357'},
850400 : {'P25358'},
850401 : {'P25359'},
850402 : {'P25332'},
850403 : {'P25360'},
850405 : {'P25300'},
850406 : {'P0CY08', 'P0CY09'},
850407 : {'P0CY06', 'P0CY07'},
850408 : {'P37265'},
850409 : {'P23255'},
850410 : {'P25361'},
850411 : {'P25625'},
850412 : {'P25381'},
850413 : {'P25626'},
850414 : {'P25627'},
850415 : {'P25628'},
850417 : {'P25630'},
850418 : {'P25631'},
850419 : {'P25632'},
850420 : {'P16120'},
850421 : {'P25355'},
850422 : {'P25635'},
850423 : {'P25637'},
850424 : {'P25638'},
850425 : {'P25639'},
850427 : {'P25337'},
850429 : {'P25364'},
850430 : {'P10862'},
850431 : {'P25365'},
850432 : {'P25641'},
850433 : {'P25334'},
850434 : {'P25642'},
850435 : {'P25382'},
850436 : {'P25390'},
850437 : {'P37262'},
850438 : {'P17261'},
850439 : {'P25659'},
850440 : {'P25644'},
850441 : {'P25646'},
850442 : {'P25648'},
850443 : {'P25649'},
850444 : {'P25372'},
850445 : {'P16649'},
850447 : {'P25651'},
850449 : {'P37263'},
850450 : {'P15891'},
850451 : {'P25653'},
850452 : {'P25654'},
850453 : {'P25341'},
850454 : {'P25336'},
850455 : {'P25655'},
850456 : {'P25656'},
850457 : {'P25366'},
850458 : {'P0CY13'},
850459 : {'P0CY11'},
850462 : {'P25346'},
850463 : {'P25657'},
850464 : {'P25606'},
850465 : {'P25607'},
850466 : {'P25608'},
850468 : {'P25610'},
850469 : {'P25377'},
850470 : {'P25611'},
850471 : {'P25612'},
850476 : {'P0CX99', 'P0CY00', 'P0CY01'},
850477 : {'P43537'},
850478 : {'P43538'},
850479 : {'P43539'},
850480 : {'P43540'},
850482 : {'P43542'},
850483 : {'P0CH63', 'P0CH64'},
850484 : {'P43544'},
850485 : {'P43545'},
850486 : {'P43534'},
850489 : {'P43548'},
850490 : {'P43549'},
850491 : {'P43550'},
850492 : {'P43551'},
850493 : {'P43552'},
850494 : {'P43553'},
850495 : {'P43554'},
850496 : {'P43555'},
850497 : {'P43556'},
850498 : {'P43557'},
850499 : {'P07283'},
850500 : {'P43558'},
850501 : {'P43560'},
850502 : {'P43561'},
850503 : {'P43562'},
850504 : {'P60010'},
850505 : {'P01123'},
850506 : {'P02557'},
850507 : {'P13433'},
850508 : {'P43563'},
850509 : {'P56628'},
850510 : {'P43564'},
850511 : {'P43565'},
850513 : {'P41546'},
850514 : {'P43567'},
850515 : {'P43568'},
850516 : {'P43569'},
850517 : {'P43570'},
850518 : {'D6VTK4'},
850519 : {'P43571'},
850520 : {'P43572'},
850521 : {'P43573'},
850522 : {'P15625'},
850523 : {'P43574'},
850524 : {'P43575'},
850527 : {'P09624'},
850528 : {'P40204'},
850529 : {'P43577'},
850530 : {'P35191'},
850532 : {'P22943'},
850534 : {'P43579'},
850535 : {'P43580'},
850536 : {'P43581'},
850537 : {'P32450'},
850538 : {'P43582'},
850539 : {'P07834'},
850540 : {'P32908'},
850541 : {'P43583'},
850543 : {'P07560'},
850544 : {'P43585'},
850545 : {'P40965'},
850547 : {'P0CX61', 'P0CX62'},
850548 : {'P0CX63', 'P0CX64'},
850549 : {'P25808'},
850550 : {'P31115'},
850551 : {'P43586'},
850552 : {'P34077'},
850553 : {'P43587'},
850554 : {'P43588'},
850555 : {'P43589'},
850556 : {'P43590'},
850558 : {'P43591'},
850559 : {'P43592'},
850561 : {'P43535'},
850562 : {'P43593'},
850563 : {'P43594'},
850565 : {'P43595'},
850566 : {'Q3E838'},
850567 : {'P43596'},
850568 : {'P27466'},
850569 : {'P23337'},
850570 : {'P43597'},
850572 : {'P43598'},
850573 : {'P43599'},
850574 : {'P34756'},
850576 : {'P43600'},
850577 : {'P43601'},
850578 : {'P43602'},
850579 : {'P39684'},
850580 : {'P43603'},
850581 : {'P38635'},
850583 : {'P43604'},
850584 : {'P43605'},
850585 : {'Q00684'},
850587 : {'P43606'},
850588 : {'P39692'},
850589 : {'P38989'},
850590 : {'P0CX45', 'P0CX46'},
850591 : {'P43607'},
850592 : {'P05747'},
850593 : {'P00127'},
850594 : {'P07270'},
850595 : {'P43608'},
850597 : {'P14724'},
850598 : {'P43609'},
850599 : {'P43610'},
850600 : {'P43611'},
850601 : {'P43612'},
850602 : {'P43613'},
850603 : {'P43614'},
850604 : {'P43615'},
850605 : {'P43616'},
850606 : {'P43617'},
850607 : {'P43618'},
850608 : {'P43619'},
850609 : {'P43620'},
850610 : {'P19955'},
850611 : {'P30657'},
850612 : {'P43621'},
850613 : {'P32496'},
850614 : {'P04806'},
850616 : {'P43623'},
850618 : {'P43625'},
850619 : {'Q07878'},
850620 : {'P0CG63'},
850621 : {'Q07872'},
850623 : {'P32523'},
850624 : {'Q07845'},
850625 : {'Q07844'},
850626 : {'Q07843'},
850627 : {'Q07834'},
850628 : {'Q07830'},
850630 : {'Q07825'},
850631 : {'Q07824'},
850632 : {'Q07821'},
850633 : {'P31539'},
850635 : {'Q12370'},
850636 : {'P10592'},
850637 : {'Q12164'},
850638 : {'Q12373'},
850639 : {'P23201'},
850641 : {'P32350'},
850642 : {'Q3E731'},
850643 : {'P04802'},
850645 : {'P14772'},
850646 : {'Q12431'},
850647 : {'Q07807'},
850648 : {'Q07804'},
850649 : {'P33750'},
850650 : {'Q07800'},
850651 : {'Q12287'},
850652 : {'P32892'},
850653 : {'Q07799'},
850654 : {'P41800'},
850655 : {'Q07798'},
850656 : {'P54790'},
850657 : {'Q12369'},
850658 : {'Q07794'},
850659 : {'Q07888'},
850660 : {'Q99208'},
850662 : {'P0CE90', 'P0CE91'},
850663 : {'Q12226'},
850664 : {'Q12525'},
850665 : {'Q12372'},
850666 : {'Q12390'},
850668 : {'Q12198'},
850669 : {'Q12358'},
850670 : {'Q12177'},
850671 : {'Q12235'},
850672 : {'Q12244'},
850673 : {'P0CD98'},
850674 : {'P0CD90'},
850675 : {'Q12473'},
850676 : {'Q03048'},
850677 : {'Q07887'},
850678 : {'P32386'},
850680 : {'P32385'},
850682 : {'P29453'},
850683 : {'P23900'},
850684 : {'Q07879'},
850685 : {'P21801'},
850686 : {'P54861'},
850687 : {'Q07895'},
850688 : {'Q07896'},
850689 : {'Q07897'},
850690 : {'Q07904'},
850691 : {'Q04673'},
850692 : {'Q07084'},
850693 : {'Q07913'},
850694 : {'Q07914'},
850695 : {'Q07915'},
850696 : {'Q07921'},
850698 : {'Q07923'},
850699 : {'Q07927'},
850700 : {'Q07928'},
850701 : {'P07272'},
850702 : {'P43132'},
850703 : {'Q07930'},
850704 : {'Q07938'},
850705 : {'Q12445'},
850706 : {'Q07949'},
850707 : {'Q07950'},
850708 : {'Q07951'},
850709 : {'Q07953'},
850710 : {'Q07959'},
850711 : {'Q07963'},
850712 : {'P39929'},
850713 : {'Q01590'},
850714 : {'P23542'},
850715 : {'P54113'},
850716 : {'P05748'},
850717 : {'Q07967'},
850718 : {'Q07978'},
850719 : {'P32849'},
850720 : {'Q07979'},
850721 : {'Q12078'},
850722 : {'Q07980'},
850724 : {'Q12088'},
850725 : {'Q07986'},
850726 : {'Q07987'},
850727 : {'Q01519'},
850728 : {'P40395'},
850730 : {'Q07988'},
850731 : {'Q07990'},
850732 : {'P22217'},
850733 : {'P06169'},
850734 : {'P46675'},
850735 : {'Q12253'},
850736 : {'Q12209'},
850737 : {'P46654'},
850738 : {'Q12110'},
850739 : {'Q12155'},
850740 : {'Q12035'},
850741 : {'Q12345'},
850742 : {'Q12026'},
850743 : {'Q12202'},
850744 : {'P38915'},
850745 : {'P32353'},
850746 : {'Q12205'},
850747 : {'P37291'},
850748 : {'P54964'},
850749 : {'P15624'},
850750 : {'P05749'},
850752 : {'Q12291'},
850753 : {'Q12144'},
850754 : {'Q99382'},
850755 : {'Q12133'},
850756 : {'P32522'},
850757 : {'Q12247'},
850758 : {'P25039'},
850759 : {'Q07993'},
850760 : {'P19263'},
850761 : {'Q08001'},
850762 : {'Q08003'},
850763 : {'Q08004'},
850764 : {'P41805'},
850766 : {'Q08023'},
850767 : {'P25385'},
850768 : {'P38634'},
850769 : {'Q12396'},
850770 : {'P13181'},
850771 : {'Q12020'},
850772 : {'P32802'},
850773 : {'Q12465'},
850774 : {'Q12509'},
850775 : {'Q12267'},
850776 : {'Q12150'},
850777 : {'P39012'},
850778 : {'P52893'},
850779 : {'P39102'},
850780 : {'Q12393'},
850781 : {'Q12325'},
850782 : {'Q12255'},
850783 : {'Q12418'},
850784 : {'Q12072'},
850785 : {'P13186'},
850786 : {'Q12347'},
850787 : {'P43634'},
850788 : {'Q12385'},
850789 : {'Q3E798'},
850790 : {'Q12452'},
850792 : {'Q12107'},
850793 : {'Q08032'},
850794 : {'Q08045'},
850795 : {'P16658'},
850796 : {'Q12019'},
850797 : {'Q12090'},
850798 : {'Q12259'},
850799 : {'P38013'},
850800 : {'Q12127'},
850803 : {'P32485'},
850805 : {'Q12500'},
850806 : {'Q12102'},
850807 : {'Q12186'},
850808 : {'Q12309'},
850809 : {'Q12354'},
850810 : {'Q99176'},
850811 : {'P32329'},
850812 : {'Q12303'},
850816 : {'Q12138'},
850817 : {'Q12288'},
850818 : {'Q12440'},
850819 : {'Q12395'},
850820 : {'Q12220'},
850821 : {'Q12436'},
850822 : {'P21192'},
850823 : {'Q12208'},
850824 : {'P20485'},
850825 : {'P16467'},
850826 : {'Q12098'},
850827 : {'P47977'},
850828 : {'Q12367'},
850829 : {'Q99271'},
850830 : {'P42900'},
850832 : {'Q02983'},
850833 : {'P09368'},
850835 : {'Q12429'},
850836 : {'Q12168'},
850837 : {'Q12530'},
850838 : {'Q12455'},
850839 : {'P43321'},
850840 : {'P27801'},
850841 : {'Q99296'},
850843 : {'P39015'},
850844 : {'Q12524'},
850845 : {'P54072'},
850846 : {'P52910'},
850847 : {'Q12338'},
850849 : {'Q8TGM6'},
850850 : {'P0CX77', 'P0CX78', 'P0CX79', 'P0CZ17'},
850851 : {'P0CE96', 'P0CE97', 'P0CE98'},
850852 : {'P0CX77', 'P0CX78', 'P0CX79', 'P0CZ17'},
850853 : {'P0C2I5'},
850854 : {'P0CX70', 'P0CX71', 'P0CX72', 'P0CX73'},
850855 : {'P0CX77', 'P0CX78', 'P0CX79', 'P0CZ17'},
850856 : {'P0CE96', 'P0CE97', 'P0CE98'},
850857 : {'P0CX77', 'P0CX78', 'P0CX79', 'P0CZ17'},
850858 : {'P0CE96', 'P0CE97', 'P0CE98'},
850859 : {'Q06235'},
850860 : {'P10507'},
850861 : {'Q06236'},
850862 : {'Q06244'},
850863 : {'Q06245'},
850864 : {'P05759'},
850865 : {'P35200'},
850868 : {'P35181'},
850869 : {'P32469'},
850870 : {'Q06247'},
850871 : {'P41939'},
850872 : {'P33322'},
850873 : {'P48743'},
850874 : {'Q06251'},
850875 : {'P14306'},
850876 : {'Q06252'},
850877 : {'P10659'},
850878 : {'Q06263'},
850879 : {'P09959'},
850880 : {'Q06266'},
850882 : {'P49166'},
850883 : {'Q06287'},
850884 : {'Q06315'},
850885 : {'P33310'},
850886 : {'Q06321'},
850887 : {'Q06324'},
850888 : {'P80667'},
850889 : {'Q05775'},
850890 : {'Q05776'},
850891 : {'Q05777'},
850892 : {'P14743'},
850893 : {'P21304'},
850894 : {'Q12460'},
850896 : {'Q05778'},
850897 : {'P52553'},
850898 : {'Q05779'},
850900 : {'P32335'},
850901 : {'P32344'},
850902 : {'P32339'},
850903 : {'Q05785'},
850904 : {'Q05787'},
850905 : {'Q04491'},
850906 : {'Q05788'},
850907 : {'P24871'},
850908 : {'Q05789'},
850909 : {'P53378'},
850910 : {'Q05790'},
850911 : {'P32791'},
850912 : {'Q05791'},
850914 : {'P53691'},
850915 : {'Q05809'},
850916 : {'Q05812'},
850917 : {'P47818'},
850918 : {'Q05942'},
850919 : {'Q05946'},
850920 : {'P39520'},
850921 : {'Q05947'},
850922 : {'Q05948'},
850923 : {'Q05949'},
850924 : {'Q05955'},
850926 : {'P0CX74', 'P0CX75', 'P0CX76'},
850927 : {'P0C2I6'},
850928 : {'Q05958'},
850930 : {'P19073'},
850933 : {'Q05979'},
850934 : {'P17214'},
850935 : {'P13099'},
850938 : {'Q05998'},
850939 : {'Q06001'},
850940 : {'Q06005'},
850941 : {'P22543'},
850942 : {'Q06538'},
850943 : {'Q06541'},
850944 : {'Q06543'},
850945 : {'Q01662'},
850946 : {'Q06549'},
850947 : {'Q06551'},
850949 : {'Q06554'},
850950 : {'P38623'},
850951 : {'P16521'},
850952 : {'P39931'},
850953 : {'Q06563'},
850955 : {'Q06567'},
850956 : {'Q06568'},
850958 : {'P0CE41'},
850959 : {'P0C2I8'},
850961 : {'Q06146'},
850962 : {'P27472'},
850963 : {'P19882'},
850964 : {'Q06147'},
850965 : {'O13549'},
850966 : {'Q99260'},
850967 : {'Q3E764'},
850968 : {'P14291'},
850969 : {'P0C0X0'},
850970 : {'Q06148'},
850971 : {'Q06149'},
850972 : {'Q06150'},
850973 : {'P22214'},
850974 : {'Q06151'},
850976 : {'Q06152'},
850977 : {'Q06156'},
850979 : {'Q06216'},
850980 : {'P29496'},
850981 : {'Q06217'},
850982 : {'Q06218'},
850983 : {'Q06224'},
850984 : {'Q05854'},
850987 : {'Q05863'},
850988 : {'Q05867'},
850990 : {'Q05871'},
850991 : {'Q05874'},
850992 : {'P29029'},
850993 : {'Q05881'},
850994 : {'P0CX33', 'P0CX34'},
850995 : {'Q02574'},
850996 : {'P46943'},
850997 : {'Q05892'},
850998 : {'P32502'},
850999 : {'P39742'},
851000 : {'P32835'},
851002 : {'Q12349'},
851004 : {'Q05899'},
851005 : {'Q05900'},
851006 : {'Q05902'},
851007 : {'P23776'},
851008 : {'Q05905'},
851010 : {'P06106'},
851013 : {'P19414'},
851014 : {'P37297'},
851015 : {'P52491'},
851016 : {'Q06702'},
851017 : {'Q06703'},
851018 : {'Q06704'},
851019 : {'P04821'},
851021 : {'Q06159'},
851022 : {'P36523'},
851023 : {'Q06160'},
851024 : {'P32457'},
851025 : {'Q06162'},
851027 : {'Q9URQ3'},
851028 : {'Q06163'},
851029 : {'P41697'},
851030 : {'Q06164'},
851032 : {'Q06168'},
851033 : {'P53769'},
851034 : {'Q06169'},
851035 : {'P49167'},
851036 : {'Q06170'},
851037 : {'Q06177'},
851039 : {'Q06178'},
851040 : {'Q02721'},
851041 : {'Q12114'},
851042 : {'P36027'},
851045 : {'P0C0T4'},
851048 : {'P32499'},
851049 : {'Q06132'},
851051 : {'P37370'},
851052 : {'P05317'},
851054 : {'Q06134'},
851055 : {'P38631'},
851056 : {'Q06135'},
851058 : {'P05743'},
851059 : {'Q06137'},
851060 : {'Q06139'},
851061 : {'Q06142'},
851063 : {'Q06143'},
851064 : {'Q06144'},
851065 : {'P49954'},
851066 : {'Q06479'},
851067 : {'P41698'},
851068 : {'P15019'},
851069 : {'P06168'},
851070 : {'Q06485'},
851071 : {'Q06488'},
851072 : {'O13565'},
851073 : {'Q05911'},
851074 : {'Q05919'},
851075 : {'Q05924'},
851076 : {'P23561'},
851077 : {'Q12129'},
851078 : {'Q3E747'},
851079 : {'Q05926'},
851082 : {'Q3E7Y3'},
851083 : {'Q05930'},
851084 : {'Q05931'},
851085 : {'Q05933'},
851086 : {'P51862'},
851087 : {'P40319'},
851088 : {'Q05934'},
851089 : {'Q05937'},
851091 : {'Q12318'},
851092 : {'P09201'},
851095 : {'P32915'},
851096 : {'Q06705'},
851097 : {'Q12748'},
851098 : {'P11325'},
851099 : {'Q12749'},
851100 : {'Q06706'},
851101 : {'Q06707'},
851102 : {'Q06708'},
851103 : {'Q06709'},
851104 : {'P41057'},
851105 : {'Q06010'},
851106 : {'Q06011'},
851107 : {'O13547'},
851108 : {'P18634'},
851109 : {'P18496'},
851110 : {'Q06032'},
851111 : {'P04039'},
851112 : {'P20795'},
851113 : {'P32794'},
851114 : {'P35207'},
851115 : {'P35817'},
851117 : {'Q06053'},
851119 : {'P32432'},
851120 : {'Q06058'},
851121 : {'Q06063'},
851122 : {'P0C2H9'},
851123 : {'Q06070'},
851124 : {'Q06071'},
851125 : {'Q06078'},
851126 : {'Q06685'},
851127 : {'P0C2J4'},
851128 : {'P0C2J3'},
851129 : {'Q06686'},
851130 : {'Q06688'},
851131 : {'Q06689'},
851132 : {'Q06991'},
851133 : {'O13578'},
851135 : {'Q06696'},
851136 : {'Q06697'},
851137 : {'Q06698'},
851139 : {'P20051'},
851140 : {'O13563'},
851141 : {'Q06409'},
851142 : {'Q06410'},
851143 : {'Q06411'},
851145 : {'Q06412'},
851146 : {'Q06417'},
851147 : {'Q06436'},
851148 : {'Q06440'},
851150 : {'Q00416'},
851151 : {'Q06671'},
851152 : {'P50095'},
851153 : {'P23287'},
851154 : {'Q06672'},
851156 : {'Q06673'},
851157 : {'O13577'},
851158 : {'P07991'},
851159 : {'P57743'},
851160 : {'P36517'},
851161 : {'Q12745'},
851162 : {'P33442'},
851163 : {'P06701'},
851164 : {'Q06200'},
851166 : {'Q06201'},
851167 : {'Q06204'},
851168 : {'P32366'},
851169 : {'P05739'},
851170 : {'Q06205'},
851171 : {'P12684'},
851172 : {'P08638'},
851173 : {'P11972'},
851174 : {'Q06208'},
851175 : {'Q06179'},
851177 : {'Q06188'},
851178 : {'Q06199'},
851180 : {'P52919'},
851181 : {'P41733'},
851182 : {'P54007'},
851183 : {'P53427'},
851184 : {'O13556'},
851185 : {'P0CX16', 'P0CX17'},
851187 : {'O13559'},
851189 : {'P0CX20', 'P0CX21', 'P0CX22'},
851190 : {'P11433'},
851191 : {'P13365'},
851192 : {'P06182'},
851193 : {'P00549'},
851194 : {'P39728'},
851195 : {'P39729'},
851196 : {'P39730'},
851197 : {'P39731'},
851199 : {'P28003'},
851200 : {'P28005'},
851201 : {'P28004'},
851202 : {'P39732'},
851203 : {'P31109'},
851204 : {'P32492'},
851205 : {'P39734'},
851206 : {'P39735'},
851207 : {'P39524'},
851208 : {'P10962'},
851209 : {'P07866'},
851210 : {'P31382'},
851211 : {'P31381'},
851212 : {'P31384'},
851213 : {'P31386'},
851214 : {'P31380'},
851215 : {'P31379'},
851216 : {'P31374'},
851217 : {'P31383'},
851218 : {'P31378'},
851219 : {'P31377'},
851220 : {'P31385'},
851221 : {'P31373'},
851222 : {'P31376'},
851223 : {'P18409'},
851224 : {'P18410'},
851225 : {'P18411'},
851226 : {'P39704'},
851229 : {'P0CE92', 'P0CE93', 'Q3E770'},
851230 : {'P39709'},
851232 : {'O13511'},
851233 : {'O13512'},
851234 : {'Q6B2U8'},
851235 : {'P39711'},
851236 : {'P39712'},
851237 : {'P39708'},
851238 : {'P39713'},
851239 : {'P39714'},
851240 : {'P39715'},
851241 : {'P27825'},
851243 : {'P39717'},
851244 : {'P39718'},
851245 : {'Q01574'},
851246 : {'P39719'},
851247 : {'P39720'},
851248 : {'P39721'},
851249 : {'P39722'},
851250 : {'P39723'},
851251 : {'P39724'},
851252 : {'Q3E793'},
851254 : {'P39726'},
851255 : {'Q01329'},
851256 : {'P39727'},
851259 : {'P10591'},
851260 : {'P32471'},
851261 : {'P39702'},
851262 : {'P34111'},
851263 : {'P39705'},
851264 : {'Q05359'},
851265 : {'P39706'},
851266 : {'P22336'},
851267 : {'P39707'},
851268 : {'O13527'},
851269 : {'P0CX57', 'P0CX58'},
851271 : {'P27637'},
851272 : {'P27616'},
851273 : {'P22209'},
851274 : {'P27636'},
851275 : {'P39545'},
851276 : {'D6VPM8'},
851279 : {'P39547'},
851280 : {'P39548'},
851281 : {'P39549'},
851282 : {'P39551'},
851284 : {'P39552'},
851285 : {'P80235'},
851286 : {'P35845'},
851289 : {'P32768'},
851294 : {'P39563'},
851295 : {'P0CX18', 'P0CX19'},
851296 : {'P39564'},
851299 : {'P35842'},
851302 : {'Q07655'},
851303 : {'Q07653'},
851304 : {'Q07651'},
851306 : {'P32797'},
851307 : {'Q07648'},
851308 : {'Q07629'},
851309 : {'Q12328'},
851310 : {'Q12468'},
851311 : {'P33327'},
851312 : {'Q12310'},
851313 : {'Q07623'},
851314 : {'Q02774'},
851315 : {'Q12121'},
851317 : {'P32837'},
851318 : {'Q12046'},
851319 : {'P32495'},
851320 : {'Q12315'},
851321 : {'Q12424'},
851322 : {'P28789'},
851323 : {'Q12443'},
851324 : {'Q07622'},
851325 : {'P36521'},
851326 : {'Q12009'},
851327 : {'P26188'},
851328 : {'Q12407'},
851329 : {'P38988'},
851330 : {'P32448'},
851332 : {'P38968'},
851333 : {'P10870'},
851334 : {'Q12063'},
851335 : {'P11076'},
851336 : {'P0CX84', 'P0CX85'},
851337 : {'P54860'},
851338 : {'Q05672'},
851339 : {'P23595'},
851340 : {'P48568'},
851342 : {'P17255'},
851344 : {'P0CX86', 'P0CX87'},
851345 : {'P48569'},
851346 : {'P48570'},
851347 : {'P01097'},
851348 : {'Q07788'},
851350 : {'P0CD99'},
851351 : {'Q07786'},
851352 : {'P54854'},
851353 : {'Q07748'},
851354 : {'Q07747'},
851356 : {'Q07738'},
851358 : {'P35688'},
851359 : {'Q07732'},
851360 : {'Q07729'},
851361 : {'Q07716'},
851362 : {'P19881'},
851363 : {'Q07688'},
851364 : {'P48365'},
851365 : {'Q07684'},
851366 : {'Q99380'},
851367 : {'Q07660'},
851368 : {'P25044'},
851369 : {'P11484'},
851371 : {'P09932'},
851372 : {'P35197'},
851373 : {'Q07657'},
851374 : {'Q12301'},
851375 : {'Q12477'},
851376 : {'P46681'},
851377 : {'Q12257'},
851378 : {'Q12027'},
851379 : {'Q12476'},
851380 : {'P32891'},
851381 : {'Q12515'},
851383 : {'Q12680'},
851384 : {'P26370'},
851385 : {'P32772'},
851386 : {'P32771'},
851387 : {'P32770'},
851388 : {'Q12055'},
851389 : {'P06100'},
851391 : {'P04819'},
851392 : {'Q12518'},
851394 : {'P39517'},
851395 : {'Q3E774'},
851396 : {'P06784'},
851398 : {'Q12082'},
851399 : {'Q12510'},
851400 : {'P24870'},
851401 : {'Q12175'},
851403 : {'Q12136'},
851404 : {'P25441'},
851406 : {'Q12142'},
851407 : {'Q99207'},
851408 : {'Q12250'},
851409 : {'Q12342'},
851410 : {'P53622'},
851411 : {'Q07589'},
851412 : {'P39078'},
851413 : {'Q07560'},
851414 : {'P48445'},
851415 : {'P04050'},
851416 : {'Q12334'},
851417 : {'Q12300'},
851418 : {'P19146'},
851419 : {'P0CX84', 'P0CX85'},
851420 : {'Q12434'},
851421 : {'P23594'},
851422 : {'P0CX86', 'P0CX87'},
851423 : {'Q12516'},
851424 : {'Q12018'},
851425 : {'Q12122'},
851426 : {'P01098'},
851427 : {'P10622'},
851428 : {'Q07555'},
851429 : {'Q99385'},
851430 : {'P25693'},
851431 : {'P25694'},
851432 : {'Q04344'},
851433 : {'Q07551'},
851434 : {'Q07549'},
851435 : {'P25037'},
851436 : {'Q07541'},
851437 : {'Q07540'},
851439 : {'Q07534'},
851440 : {'Q07533'},
851441 : {'P52891'},
851442 : {'Q07532'},
851444 : {'Q07530'},
851445 : {'Q07528'},
851446 : {'Q07527'},
851447 : {'Q12277'},
851448 : {'Q12513'},
851449 : {'Q12103'},
851450 : {'P06242'},
851451 : {'P40990'},
851452 : {'P07269'},
851453 : {'P43124'},
851454 : {'P43122'},
851455 : {'P43123'},
851456 : {'P15436'},
851457 : {'P39009'},
851458 : {'Q12154'},
851459 : {'Q12191'},
851460 : {'Q12368'},
851461 : {'Q12377'},
851462 : {'P33775'},
851464 : {'P52867'},
851466 : {'P38985'},
851467 : {'Q12229'},
851468 : {'P22007'},
851469 : {'Q12066'},
851470 : {'Q05166'},
851471 : {'Q07508'},
851472 : {'Q07505'},
851473 : {'Q3E7B7'},
851474 : {'Q07500'},
851475 : {'Q07478'},
851476 : {'P0CX51', 'P0CX52'},
851477 : {'Q12690'},
851478 : {'P05318'},
851479 : {'Q07471'},
851480 : {'P50873'},
851481 : {'P32419'},
851482 : {'Q07468'},
851483 : {'Q07458'},
851484 : {'P0C2H8'},
851485 : {'Q07457'},
851486 : {'Q07454'},
851487 : {'Q07451'},
851488 : {'Q07442'},
851491 : {'P14066'},
851492 : {'P07255'},
851493 : {'P21954'},
851494 : {'Q07418'},
851495 : {'P50623'},
851497 : {'Q07395'},
851498 : {'P41058'},
851499 : {'Q07381'},
851500 : {'Q12223'},
851501 : {'P25386'},
851502 : {'Q07379'},
851503 : {'P39678'},
851504 : {'P41940'},
851506 : {'Q07376'},
851507 : {'Q07362'},
851508 : {'P33333'},
851509 : {'P33399'},
851511 : {'P50112'},
851512 : {'Q07351'},
851513 : {'P20604'},
851514 : {'Q12408'},
851515 : {'O75012'},
851516 : {'P38913'},
851517 : {'P10849'},
851518 : {'Q07350'},
851520 : {'P06700'},
851521 : {'P12945'},
851522 : {'Q12459'},
851524 : {'Q12140'},
851525 : {'Q12069'},
851527 : {'Q12361'},
851529 : {'Q12093'},
851530 : {'Q12389'},
851531 : {'P19736'},
851532 : {'P32381'},
851533 : {'P54199'},
851535 : {'Q07349'},
851536 : {'Q12100'},
851537 : {'P52290'},
851539 : {'Q00055'},
851541 : {'Q12008'},
851542 : {'Q03465'},
851543 : {'Q12451'},
851544 : {'Q12403'},
851545 : {'P06243'},
851547 : {'Q99190'},
851548 : {'P15646'},
851549 : {'P32828'},
851550 : {'Q12489'},
851551 : {'Q12438'},
851553 : {'Q12210'},
851554 : {'Q12157'},
851557 : {'P40327'},
851558 : {'P35182'},
851559 : {'Q12124'},
851560 : {'Q12165'},
851561 : {'Q12158'},
851562 : {'Q03435'},
851563 : {'Q03441'},
851564 : {'P32356'},
851565 : {'P41920'},
851566 : {'Q03446'},
851567 : {'P25301'},
851568 : {'P41910'},
851569 : {'P40317'},
851570 : {'P00912'},
851572 : {'P13045'},
851574 : {'P32568'},
851575 : {'P49626'},
851576 : {'Q12488'},
851577 : {'Q99359'},
851579 : {'Q12248'},
851580 : {'Q12494'},
851581 : {'Q12185'},
851582 : {'P48015'},
851583 : {'Q12084'},
851584 : {'Q12099'},
851585 : {'Q12421'},
851587 : {'P07284'},
851589 : {'P0CX47', 'P0CX48'},
851590 : {'Q12457'},
851591 : {'Q12071'},
851592 : {'Q00816'},
851594 : {'Q12021'},
851595 : {'Q04341'},
851596 : {'Q12335'},
851597 : {'Q12117'},
851598 : {'P40971'},
851599 : {'Q12472'},
851600 : {'Q12392'},
851602 : {'P0C289'},
851604 : {'Q6Q5X2'},
851605 : {'P14843'},
851606 : {'P28817'},
851607 : {'P15180'},
851608 : {'Q12691'},
851609 : {'Q01896'},
851610 : {'P13587'},
851611 : {'Q03201'},
851612 : {'Q03205'},
851613 : {'Q03125'},
851614 : {'P11353'},
851615 : {'Q04307'},
851616 : {'P41815'},
851617 : {'P32347'},
851618 : {'Q04311'},
851620 : {'P00942'},
851621 : {'Q99288'},
851623 : {'P32325'},
851624 : {'P14682'},
851625 : {'Q12355'},
851626 : {'Q12025'},
851627 : {'Q99220'},
851628 : {'P54857'},
851631 : {'P15732'},
851632 : {'Q12176'},
851633 : {'Q12298'},
851634 : {'P40970'},
851635 : {'Q12156'},
851636 : {'P05756'},
851637 : {'Q12167'},
851638 : {'Q12378'},
851639 : {'Q12454'},
851640 : {'P54858'},
851641 : {'P32571'},
851642 : {'Q12497'},
851643 : {'Q12447'},
851644 : {'P38954'},
851645 : {'P38956'},
851646 : {'P31688'},
851647 : {'P32345'},
851648 : {'P38953'},
851649 : {'Q01589'},
851650 : {'P38957'},
851651 : {'P38958'},
851652 : {'Q3E7C1'},
851653 : {'P38959'},
851654 : {'P32896'},
851655 : {'P38960'},
851656 : {'P38961'},
851657 : {'P38962'},
851658 : {'P33304'},
851659 : {'P35179'},
851660 : {'P35178'},
851661 : {'Q02775'},
851663 : {'P38966'},
851664 : {'Q03193'},
851665 : {'Q03195'},
851666 : {'P52490'},
851667 : {'Q12675'},
851670 : {'Q03833'},
851671 : {'Q03834'},
851672 : {'Q03835'},
851674 : {'Q03855'},
851675 : {'Q03856'},
851676 : {'P34730'},
851677 : {'Q03860'},
851678 : {'Q03862'},
851680 : {'P32917'},
851681 : {'Q03868'},
851682 : {'Q12116'},
851683 : {'Q04549'},
851685 : {'Q04562'},
851686 : {'P46944'},
851687 : {'Q04585'},
851688 : {'O13329'},
851690 : {'P52892'},
851691 : {'P40316'},
851692 : {'Q04598'},
851693 : {'Q04597'},
851694 : {'Q04599'},
851695 : {'Q04600'},
851696 : {'Q04601'},
851697 : {'Q04602'},
851698 : {'P15565'},
851699 : {'Q04603'},
851700 : {'P13185'},
851701 : {'P26798'},
851702 : {'Q04608'},
851703 : {'Q04623'},
851704 : {'Q04629'},
851705 : {'P08566'},
851706 : {'Q03897'},
851707 : {'P32599'},
851708 : {'Q03898'},
851709 : {'Q03899'},
851710 : {'Q03900'},
851713 : {'P39109'},
851714 : {'P16664'},
851716 : {'P17629'},
851717 : {'Q03919'},
851718 : {'Q03920'},
851719 : {'Q03921'},
851720 : {'P39108'},
851721 : {'P22470'},
851722 : {'P53379'},
851723 : {'Q03761'},
851724 : {'P08153'},
851725 : {'Q03764'},
851726 : {'P19262'},
851727 : {'Q00402'},
851729 : {'P47976'},
851730 : {'Q03768'},
851731 : {'Q03769'},
851733 : {'P14832'},
851734 : {'P50106'},
851736 : {'P13663'},
851737 : {'P46674'},
851738 : {'Q03770'},
851739 : {'Q03771'},
851740 : {'Q12163'},
851741 : {'Q03772'},
851742 : {'P30619'},
851743 : {'Q03774'},
851744 : {'P89102'},
851745 : {'Q12030'},
851746 : {'P06101'},
851747 : {'Q12427'},
851748 : {'P11075'},
851750 : {'Q03964'},
851751 : {'Q12329'},
851752 : {'P05453'},
851753 : {'P07250'},
851754 : {'Q03973'},
851755 : {'Q03976'},
851756 : {'P32494'},
851757 : {'P21734'},
851758 : {'P37298'},
851759 : {'Q03981'},
851760 : {'Q03983'},
851761 : {'Q04002'},
851762 : {'Q04003'},
851763 : {'P40986'},
851764 : {'Q04004'},
851765 : {'Q04005'},
851766 : {'Q04006'},
851767 : {'Q04007'},
851768 : {'P39079'},
851770 : {'P22213'},
851771 : {'Q03940'},
851772 : {'P53688'},
851774 : {'P49686'},
851775 : {'P15424'},
851776 : {'P42073'},
851777 : {'Q03941'},
851778 : {'P14905'},
851779 : {'Q03942'},
851781 : {'Q03944'},
851782 : {'Q03954'},
851784 : {'Q03956'},
851785 : {'O13525'},
851786 : {'Q03455'},
851787 : {'Q03466'},
851788 : {'P39001'},
851789 : {'P38994'},
851790 : {'Q03482'},
851792 : {'Q03483'},
851793 : {'Q03494'},
851794 : {'Q99231'},
851795 : {'Q12441'},
851797 : {'P32501'},
851798 : {'P12612'},
851799 : {'Q12151'},
851800 : {'Q12449'},
851802 : {'P07248'},
851803 : {'P14737'},
851804 : {'Q04921'},
851805 : {'Q04922'},
851807 : {'Q04924'},
851808 : {'Q04925'},
851809 : {'Q04930'},
851810 : {'P02293'},
851811 : {'P04911'},
851812 : {'P07170'},
851813 : {'P11978'},
851814 : {'P39081'},
851815 : {'Q04934'},
851817 : {'Q04935'},
851818 : {'P09950'},
851819 : {'Q04947'},
851820 : {'P49367'},
851821 : {'Q03776'},
851822 : {'Q03778'},
851823 : {'P36519'},
851824 : {'P41810'},
851825 : {'Q03780'},
851827 : {'Q03782'},
851829 : {'P22580'},
851830 : {'P23394'},
851831 : {'P35056'},
851832 : {'P50108'},
851833 : {'Q03784'},
851834 : {'Q03785'},
851835 : {'Q03786'},
851836 : {'Q03787'},
851838 : {'P37304'},
851839 : {'P40314'},
851840 : {'Q12041'},
851841 : {'P38907'},
851842 : {'Q12508'},
851843 : {'P15202'},
851844 : {'Q12504'},
851845 : {'P33416'},
851846 : {'Q03935'},
851847 : {'Q12379'},
851848 : {'P52911'},
851850 : {'Q99303'},
851851 : {'Q07791'},
851852 : {'Q07793'},
851853 : {'O74302'},
851855 : {'Q12331'},
851856 : {'Q12086'},
851857 : {'P39010'},
851858 : {'Q05568'},
851859 : {'Q05580'},
851860 : {'Q05583'},
851861 : {'P04803'},
851862 : {'P38995'},
851865 : {'Q05584'},
851866 : {'Q05610'},
851868 : {'Q05611'},
851869 : {'P87284'},
851870 : {'P35198'},
851873 : {'Q05635'},
851874 : {'Q05636'},
851875 : {'Q05637'},
851876 : {'Q05648'},
851877 : {'P15442'},
851878 : {'Q05521'},
851879 : {'P31111'},
851880 : {'Q05530'},
851881 : {'Q05533'},
851882 : {'Q05541'},
851884 : {'Q05543'},
851885 : {'Q05549'},
851886 : {'P32916'},
851887 : {'P24276'},
851888 : {'Q05567'},
851889 : {'Q06629'},
851890 : {'Q06630'},
851891 : {'P38992'},
851892 : {'P09457'},
851893 : {'Q06631'},
851894 : {'P32264'},
851895 : {'Q06632'},
851896 : {'Q06636'},
851897 : {'Q06639'},
851898 : {'P35176'},
851899 : {'P49775'},
851900 : {'Q06640'},
851902 : {'Q06644'},
851903 : {'P47822'},
851904 : {'Q06648'},
851905 : {'P46676'},
851906 : {'P32776'},
851907 : {'Q12153'},
851908 : {'Q06651'},
851909 : {'Q06665'},
851910 : {'Q06667'},
851911 : {'Q06668'},
851913 : {'P0CX65', 'P0CX66', 'P0CX67', 'P0CX68', 'P0CX69'},
851914 : {'P0C2I2'},
851915 : {'Q06674'},
851916 : {'Q06675'},
851917 : {'Q06676'},
851918 : {'Q06677'},
851919 : {'P69851'},
851920 : {'P38986'},
851921 : {'Q06678'},
851922 : {'P81449'},
851923 : {'P32609'},
851924 : {'Q06679'},
851925 : {'Q06680'},
851926 : {'Q06681'},
851928 : {'P52286'},
851929 : {'P28795'},
851930 : {'Q06682'},
851931 : {'P49018'},
851932 : {'Q06683'},
851933 : {'Q05468'},
851934 : {'Q05471'},
851935 : {'P52918'},
851936 : {'Q05473'},
851937 : {'P21771'},
851938 : {'Q05497'},
851939 : {'Q05498'},
851942 : {'Q05506'},
851943 : {'P39004'},
851944 : {'P39003'},
851946 : {'P32466'},
851947 : {'Q05515'},
851948 : {'P10662'},
851949 : {'Q05518'},
851950 : {'Q06325'},
851952 : {'P50273'},
851953 : {'P42223'},
851954 : {'Q06328'},
851955 : {'P29509'},
851956 : {'P07285'},
851957 : {'P32380'},
851959 : {'Q06333'},
851960 : {'Q06336'},
851962 : {'Q06337'},
851963 : {'Q06338'},
851964 : {'Q06339'},
851965 : {'Q06340'},
851967 : {'O94742'},
851968 : {'P40968'},
851969 : {'Q06344'},
851970 : {'P0CX70', 'P0CX71', 'P0CX72', 'P0CX73'},
851971 : {'P0C2I3'},
851972 : {'P87287'},
851973 : {'Q06346'},
851974 : {'Q12458'},
851975 : {'P33301'},
851976 : {'Q06349'},
851977 : {'Q06350'},
851978 : {'Q06385'},
851979 : {'Q06389'},
851980 : {'Q06390'},
851981 : {'P32839'},
851982 : {'P48360'},
851983 : {'Q06405'},
851984 : {'Q06406'},
851985 : {'Q06407'},
851986 : {'Q3E785'},
851987 : {'Q06408'},
851988 : {'Q12159'},
851989 : {'Q3E6R5'},
851990 : {'P02400'},
851991 : {'Q12493'},
851992 : {'Q12359'},
851993 : {'P32324'},
851994 : {'Q04149'},
851995 : {'Q04162'},
851996 : {'P39743'},
851997 : {'P17121'},
851998 : {'P52488'},
852000 : {'Q04170'},
852001 : {'P06844'},
852002 : {'Q04172'},
852003 : {'P33298'},
852004 : {'Q04175'},
852006 : {'Q92317'},
852007 : {'Q04177'},
852008 : {'Q04178'},
852009 : {'Q04179'},
852011 : {'P21595'},
852012 : {'P21623'},
852013 : {'P34087'},
852014 : {'P32387'},
852015 : {'Q04182'},
852016 : {'Q04183'},
852017 : {'P04161'},
852018 : {'Q04195'},
852019 : {'P32584'},
852020 : {'Q12743'},
852021 : {'Q04031'},
852023 : {'P16151'},
852024 : {'Q04033'},
852025 : {'Q04048'},
852026 : {'P0CX53', 'P0CX54'},
852028 : {'Q04049'},
852030 : {'P41809'},
852031 : {'Q04052'},
852032 : {'P32578'},
852033 : {'P24813'},
852034 : {'Q02647'},
852035 : {'Q04053'},
852037 : {'Q04062'},
852038 : {'Q04066'},
852039 : {'Q04067'},
852041 : {'P32898'},
852042 : {'Q01560'},
852044 : {'Q04080'},
852045 : {'Q04081'},
852046 : {'P33329'},
852047 : {'Q04082'},
852048 : {'Q04083'},
852049 : {'Q04087'},
852050 : {'Q04089'},
852051 : {'P36973'},
852053 : {'P38931'},
852054 : {'Q04093'},
852057 : {'Q04110'},
852058 : {'P14127'},
852059 : {'Q02336'},
852060 : {'Q02354'},
852061 : {'P0CX55', 'P0CX56'},
852062 : {'Q04116'},
852063 : {'Q04119'},
852064 : {'Q04120'},
852065 : {'P15454'},
852066 : {'Q04121'},
852068 : {'Q03280'},
852069 : {'Q03281'},
852070 : {'Q03289'},
852071 : {'Q03290'},
852072 : {'P34165'},
852073 : {'P36527'},
852074 : {'Q00947'},
852075 : {'P38904'},
852076 : {'Q03305'},
852077 : {'Q03306'},
852079 : {'Q03322'},
852080 : {'Q03323'},
852081 : {'Q03327'},
852082 : {'P0C2H7'},
852083 : {'Q03337'},
852084 : {'Q03338'},
852086 : {'Q03361'},
852087 : {'Q03362'},
852088 : {'P06782'},
852089 : {'P40993'},
852090 : {'Q03370'},
852091 : {'Q03373'},
852092 : {'P11491'},
852093 : {'Q03375'},
852094 : {'P27809'},
852095 : {'P39904'},
852096 : {'Q03388'},
852097 : {'Q03390'},
852098 : {'Q99258'},
852099 : {'P40960'},
852100 : {'Q03406'},
852101 : {'Q03407'},
852102 : {'Q03419'},
852104 : {'Q03429'},
852105 : {'Q03430'},
852106 : {'P23643'},
852107 : {'Q04373'},
852108 : {'P30605'},
852109 : {'P28791'},
852110 : {'Q04377'},
852111 : {'P51402'},
852112 : {'Q04383'},
852113 : {'P19358'},
852114 : {'Q04396'},
852115 : {'Q04398'},
852116 : {'P50896'},
852117 : {'Q04399'},
852119 : {'Q12263'},
852121 : {'P48813'},
852122 : {'Q12306'},
852123 : {'Q04401'},
852124 : {'P17695'},
852125 : {'Q04406'},
852126 : {'Q04408'},
852127 : {'Q12034'},
852128 : {'Q04409'},
852129 : {'Q04410'},
852130 : {'P32474'},
852131 : {'P32472'},
852133 : {'Q04411'},
852134 : {'P08459'},
852135 : {'P08458'},
852136 : {'Q04412'},
852138 : {'P56508'},
852139 : {'Q04418'},
852141 : {'Q04429'},
852142 : {'P00128'},
852143 : {'P22108'},
852144 : {'Q04430'},
852145 : {'Q04431'},
852146 : {'Q04432'},
852147 : {'Q04433'},
852149 : {'P39932'},
852150 : {'P33751'},
852152 : {'Q03034'},
852153 : {'Q03036'},
852154 : {'Q03049'},
852155 : {'Q03050'},
852158 : {'P0CX20', 'P0CX21', 'P0CX22'},
852159 : {'Q7M4S9'},
852160 : {'A0A023PXF5', 'Q3E7Y4'},
852161 : {'Q3E7Y5'},
852163 : {'Q3E770'},
852167 : {'P38162'},
852168 : {'P38163'},
852169 : {'P24583'},
852170 : {'P38164'},
852171 : {'P38165'},
852172 : {'P38166'},
852173 : {'P38167'},
852174 : {'Q12260'},
852175 : {'Q12491'},
852177 : {'P07251'},
852179 : {'P38169'},
852180 : {'P38170'},
852181 : {'P38172'},
852184 : {'P25046'},
852185 : {'P38061'},
852186 : {'Q6Q595'},
852187 : {'P38174'},
852188 : {'P38175'},
852189 : {'P38176'},
852190 : {'P38110'},
852191 : {'P0CX41', 'P0CX42'},
852192 : {'P38177'},
852193 : {'P38041'},
852194 : {'P38042'},
852196 : {'P38179'},
852197 : {'P38180'},
852198 : {'P33893'},
852199 : {'P38181'},
852200 : {'P38182'},
852202 : {'P09436'},
852203 : {'P09435'},
852205 : {'P32357'},
852206 : {'P0CX39', 'P0CX40'},
852207 : {'Q3E840'},
852208 : {'P38185'},
852209 : {'P35183'},
852211 : {'P38063'},
852212 : {'P38187'},
852214 : {'P34228'},
852215 : {'P34227'},
852216 : {'P28742'},
852218 : {'P34226'},
852219 : {'P34225'},
852220 : {'Q3E7A4'},
852221 : {'P34224'},
852222 : {'P34223'},
852223 : {'P34222'},
852224 : {'P34221'},
852225 : {'P34220'},
852226 : {'P34219'},
852228 : {'P34218'},
852229 : {'P34217'},
852230 : {'P32602'},
852231 : {'P38191'},
852233 : {'P34216'},
852234 : {'P38193'},
852235 : {'P07256'},
852236 : {'P38194'},
852237 : {'P38195'},
852238 : {'P38196'},
852239 : {'P23724'},
852240 : {'P18414'},
852241 : {'P28274'},
852242 : {'P38064'},
852243 : {'P38065'},
852244 : {'P38197'},
852245 : {'P38121'},
852246 : {'P38198'},
852247 : {'P38066'},
852248 : {'P38199'},
852249 : {'P38200'},
852250 : {'P18239'},
852251 : {'Q3E756'},
852252 : {'P38201'},
852253 : {'P38202'},
852254 : {'P0CX82', 'P0CX83'},
852255 : {'P38203'},
852256 : {'P38204'},
852257 : {'P38205'},
852258 : {'P29469'},
852259 : {'P36775'},
852260 : {'P13434'},
852261 : {'P38206'},
852262 : {'P38207'},
852263 : {'P38208'},
852264 : {'P32319'},
852265 : {'P16892'},
852266 : {'P32316'},
852269 : {'P32786'},
852270 : {'P32785'},
852271 : {'P32784'},
852273 : {'P32788'},
852274 : {'P32789'},
852275 : {'P32479'},
852276 : {'P32790'},
852277 : {'P38210'},
852278 : {'P33200'},
852279 : {'Q12266'},
852280 : {'Q12490'},
852282 : {'P35194'},
852283 : {'P04912'},
852284 : {'P02294'},
852285 : {'P35195'},
852286 : {'P35172'},
852287 : {'P35196'},
852288 : {'P18900'},
852289 : {'P38211'},
852290 : {'P38212'},
852291 : {'P38067'},
852292 : {'P38213'},
852293 : {'P38124'},
852294 : {'P02309'},
852295 : {'P61830'},
852296 : {'P00817'},
852298 : {'Q12217'},
852299 : {'Q12193'},
852300 : {'P38215'},
852302 : {'P38068'},
852303 : {'P38069'},
852304 : {'P38216'},
852305 : {'P38217'},
852306 : {'P08431'},
852307 : {'P04397'},
852308 : {'P04385'},
852309 : {'P05316'},
852310 : {'P38218'},
852311 : {'P29465'},
852312 : {'P38072'},
852313 : {'P38219'},
852314 : {'P38071'},
852315 : {'P38220'},
852316 : {'P38070'},
852317 : {'P38221'},
852318 : {'P38222'},
852319 : {'P10664'},
852320 : {'P38223'},
852321 : {'P38073'},
852322 : {'P38074'},
852323 : {'P38075'},
852324 : {'P35206'},
852325 : {'P23833'},
852326 : {'P14180'},
852327 : {'P38077'},
852328 : {'P38224'},
852329 : {'P38225'},
852330 : {'P38226'},
852331 : {'P38227'},
852332 : {'P38228'},
852334 : {'P38229'},
852335 : {'P38230'},
852336 : {'P38231'},
852337 : {'P0CX47', 'P0CX48'},
852338 : {'P21538'},
852340 : {'P38232'},
852341 : {'P38234'},
852342 : {'P38235'},
852343 : {'P38079'},
852344 : {'P19735'},
852346 : {'P38081'},
852348 : {'P38236'},
852349 : {'P38237'},
852350 : {'Q3E790'},
852351 : {'P38080'},
852352 : {'P32833'},
852353 : {'P38238'},
852354 : {'P38239'},
852356 : {'P38083'},
852357 : {'P38241'},
852358 : {'P38082'},
852359 : {'P27654'},
852360 : {'P38084'},
852361 : {'P38085'},
852362 : {'P38242'},
852363 : {'P38243'},
852364 : {'P15992'},
852365 : {'P38086'},
852366 : {'P38244'},
852368 : {'P38246'},
852369 : {'P38247'},
852370 : {'P38248'},
852371 : {'P38249'},
852372 : {'P18759'},
852373 : {'P35177'},
852376 : {'P15731'},
852377 : {'P18412'},
852378 : {'P09440'},
852379 : {'P0CX82', 'P0CX83'},
852380 : {'P18238'},
852381 : {'O43137'},
852382 : {'P38250'},
852383 : {'P38251'},
852385 : {'P15873'},
852386 : {'P11633'},
852387 : {'P38253'},
852388 : {'P32830'},
852389 : {'P24031'},
852390 : {'P00635'},
852391 : {'P38254'},
852392 : {'P38255'},
852393 : {'P38256'},
852394 : {'P22219'},
852395 : {'P38257'},
852397 : {'P38260'},
852398 : {'P38261'},
852399 : {'P38262'},
852401 : {'P38087'},
852402 : {'P38263'},
852403 : {'P38264'},
852404 : {'P38265'},
852405 : {'P38266'},
852406 : {'P06787'},
852407 : {'P16661'},
852408 : {'Q01976'},
852410 : {'P14922'},
852411 : {'P31244'},
852412 : {'P07702'},
852414 : {'P33315'},
852415 : {'P02994'},
852416 : {'P32605'},
852417 : {'P07253'},
852418 : {'P38088'},
852419 : {'P36531'},
852421 : {'P32367'},
852422 : {'P38089'},
852423 : {'Q00764'},
852424 : {'P16140'},
852425 : {'P38270'},
852426 : {'P38271'},
852427 : {'P38272'},
852428 : {'P38273'},
852429 : {'P38090'},
852431 : {'P38274'},
852432 : {'P20486'},
852433 : {'P38111'},
852434 : {'P38276'},
852435 : {'P38277'},
852436 : {'P38109'},
852437 : {'P18963'},
852438 : {'P38278'},
852439 : {'P38112'},
852440 : {'P12385'},
852442 : {'P38113'},
852443 : {'P38120'},
852444 : {'P38279'},
852445 : {'P38280'},
852446 : {'P38115'},
852447 : {'P38114'},
852448 : {'P38281'},
852449 : {'P38282'},
852450 : {'P33312'},
852451 : {'P20434'},
852452 : {'P33313'},
852453 : {'P38283'},
852454 : {'P38284'},
852455 : {'P38285'},
852456 : {'P38286'},
852457 : {'P00546'},
852458 : {'P38287'},
852459 : {'P38288'},
852460 : {'P38374'},
852461 : {'P38289'},
852462 : {'P38116'},
852463 : {'P38290'},
852464 : {'P20049'},
852465 : {'P38291'},
852466 : {'P38292'},
852467 : {'P32590'},
852468 : {'P33755'},
852469 : {'P33754'},
852470 : {'P32909'},
852471 : {'P38293'},
852472 : {'P38123'},
852474 : {'P38122'},
852476 : {'P38295'},
852477 : {'P38297'},
852478 : {'P38125'},
852479 : {'P0CX37', 'P0CX38'},
852480 : {'P38128'},
852481 : {'P38298'},
852482 : {'P38299'},
852483 : {'P38300'},
852484 : {'P38126'},
852485 : {'P38301'},
852486 : {'P38302'},
852487 : {'P05755'},
852489 : {'Q02753'},
852491 : {'P38127'},
852492 : {'P38304'},
852493 : {'P38305'},
852494 : {'P13712'},
852495 : {'P12709'},
852496 : {'P38306'},
852497 : {'P38129'},
852498 : {'P38131'},
852499 : {'P29366'},
852500 : {'P38307'},
852501 : {'P38132'},
852502 : {'P38308'},
852503 : {'P38139'},
852504 : {'P38130'},
852506 : {'P38310'},
852507 : {'P32528'},
852511 : {'P38312'},
852512 : {'P38313'},
852513 : {'P32831'},
852514 : {'P15807'},
852515 : {'P38314'},
852516 : {'Q01448'},
852517 : {'P38315'},
852518 : {'P38316'},
852519 : {'P32327'},
852520 : {'P38317'},
852521 : {'P38318'},
852522 : {'P32473'},
852523 : {'P38137'},
852525 : {'P38319'},
852526 : {'P38321'},
852528 : {'P38323'},
852529 : {'P38324'},
852530 : {'P38138'},
852531 : {'P38325'},
852532 : {'P38326'},
852533 : {'P38151'},
852535 : {'P69850'},
852536 : {'P38328'},
852537 : {'P38329'},
852538 : {'P32783'},
852539 : {'P21372'},
852540 : {'P38330'},
852541 : {'P38140'},
852542 : {'P38141'},
852543 : {'P38142'},
852544 : {'P38331'},
852545 : {'P07286'},
852546 : {'P38143'},
852547 : {'P38144'},
852548 : {'P38332'},
852549 : {'P38333'},
852550 : {'P33734'},
852551 : {'P32449'},
852552 : {'P33757'},
852553 : {'P33759'},
852554 : {'P33317'},
852555 : {'P32570'},
852556 : {'P38334'},
852557 : {'P38335'},
852558 : {'Q3E776'},
852559 : {'P38145'},
852560 : {'P38336'},
852561 : {'P38337'},
852562 : {'P38338'},
852563 : {'P38339'},
852564 : {'P38340'},
852565 : {'P37292'},
852566 : {'P38341'},
852567 : {'P38146'},
852568 : {'P38342'},
852569 : {'P38344'},
852571 : {'P36532'},
852572 : {'P38345'},
852573 : {'P38346'},
852574 : {'P38347'},
852575 : {'P38348'},
852576 : {'P38349'},
852577 : {'P38147'},
852578 : {'P29539'},
852579 : {'P38148'},
852580 : {'P27344'},
852582 : {'P38351'},
852583 : {'P38352'},
852584 : {'P38149'},
852585 : {'P36526'},
852586 : {'P38353'},
852587 : {'P38150'},
852588 : {'P38354'},
852589 : {'P37302'},
852590 : {'P38355'},
852591 : {'P38153'},
852592 : {'P18480'},
852593 : {'P38356'},
852594 : {'P38152'},
852596 : {'P38358'},
852597 : {'P38359'},
852598 : {'P38360'},
852599 : {'P38361'},
852600 : {'P38157'},
852601 : {'P38156'},
852602 : {'P38158'},
852603 : {'P38155'},
852605 : {'P0CX12', 'P0CX13'},
852606 : {'P46655'},
852607 : {'P53064'},
852608 : {'P53065'},
852609 : {'P53066'},
852610 : {'P53067'},
852611 : {'P53068'},
852612 : {'P33307'},
852614 : {'P06774'},
852615 : {'P53071'},
852616 : {'P53070'},
852617 : {'P07244'},
852618 : {'P22224'},
852619 : {'P53072'},
852620 : {'P53073'},
852621 : {'P53074'},
852622 : {'P53036'},
852623 : {'P53075'},
852624 : {'P53076'},
852625 : {'Q92316'},
852626 : {'P53077'},
852628 : {'P53053'},
852629 : {'P53054'},
852630 : {'P0CE92', 'P0CE93', 'Q3E770'},
852631 : {'P53056'},
852632 : {'P53057'},
852633 : {'Q3E740'},
852634 : {'P53058'},
852635 : {'P53059'},
852636 : {'P10127'},
852637 : {'P32804'},
852638 : {'P32805'},
852639 : {'P04807'},
852640 : {'P32608'},
852641 : {'P51979'},
852642 : {'P53060'},
852643 : {'P53061'},
852644 : {'P22434'},
852645 : {'P53062'},
852646 : {'P53063'},
852647 : {'P40107'},
852648 : {'P53078'},
852649 : {'P53079'},
852650 : {'P53080'},
852651 : {'P53081'},
852652 : {'P53082'},
852654 : {'P53083'},
852655 : {'P53086'},
852657 : {'P35190'},
852659 : {'Q02793'},
852660 : {'P32912'},
852661 : {'P53088'},
852662 : {'P51996'},
852663 : {'P53035'},
852664 : {'P34164'},
852665 : {'P32558'},
852666 : {'P22137'},
852667 : {'P13711'},
852669 : {'P53089'},
852670 : {'P09620'},
852672 : {'P53090'},
852673 : {'P53091'},
852675 : {'P32803'},
852676 : {'P53093'},
852678 : {'P53094'},
852679 : {'P53095'},
852680 : {'P33892'},
852681 : {'P53096'},
852682 : {'P53097'},
852683 : {'P41833'},
852684 : {'P32799'},
852685 : {'Q00362'},
852686 : {'P39938'},
852688 : {'P04037'},
852689 : {'P53099'},
852690 : {'P53100'},
852691 : {'P53101'},
852692 : {'P53102'},
852694 : {'P40956'},
852695 : {'P53104'},
852696 : {'P43637'},
852697 : {'P39016'},
852699 : {'P46945'},
852700 : {'P46946'},
852701 : {'P46947'},
852702 : {'P22147'},
852703 : {'Q02199'},
852704 : {'P45818'},
852705 : {'P45819'},
852707 : {'P32579'},
852708 : {'P45820'},
852709 : {'P13586'},
852710 : {'P15315'},
852712 : {'P53107'},
852713 : {'P32863'},
852714 : {'P53032'},
852715 : {'P53108'},
852716 : {'P53109'},
852717 : {'P53110'},
852719 : {'P38622'},
852720 : {'P53111'},
852721 : {'P22855'},
852722 : {'P18898'},
852723 : {'P50113'},
852724 : {'P53112'},
852726 : {'P53114'},
852728 : {'P53115'},
852729 : {'P28777'},
852730 : {'P05738'},
852731 : {'P53117'},
852732 : {'P33891'},
852733 : {'P53118'},
852734 : {'P30775'},
852735 : {'P30777'},
852736 : {'P53119'},
852737 : {'P53120'},
852738 : {'P53121'},
852739 : {'P53122'},
852740 : {'P41811'},
852741 : {'P53123'},
852742 : {'P0CX43', 'P0CX44'},
852743 : {'P53124'},
852744 : {'P53125'},
852746 : {'P53127'},
852747 : {'Q01159'},
852748 : {'Q01163'},
852749 : {'P52868'},
852750 : {'P38633'},
852751 : {'P53012'},
852752 : {'P53128'},
852753 : {'P53129'},
852754 : {'P25443'},
852755 : {'P32505'},
852756 : {'P53130'},
852757 : {'P53131'},
852758 : {'P27697'},
852761 : {'P53133'},
852762 : {'P26309'},
852763 : {'P12904'},
852764 : {'P53134'},
852765 : {'P53135'},
852766 : {'P53040'},
852767 : {'P53136'},
852768 : {'P53137'},
852770 : {'P53139'},
852771 : {'P53140'},
852772 : {'P53141'},
852773 : {'P46672'},
852774 : {'P53142'},
852775 : {'P02406'},
852777 : {'P53144'},
852778 : {'P53011'},
852779 : {'P53145'},
852780 : {'P53146'},
852782 : {'P21827'},
852783 : {'P53147'},
852785 : {'P38932'},
852786 : {'P53010'},
852787 : {'P53148'},
852788 : {'P49687'},
852789 : {'P52920'},
852790 : {'P53150'},
852791 : {'P32435'},
852793 : {'P53152'},
852794 : {'P40957'},
852795 : {'P53153'},
852796 : {'P53154'},
852797 : {'P53009'},
852798 : {'P53155'},
852799 : {'P53156'},
852800 : {'P53157'},
852801 : {'P53158'},
852802 : {'P20447'},
852803 : {'P19807'},
852804 : {'P05737'},
852805 : {'P53159'},
852806 : {'P10961'},
852809 : {'P22149'},
852810 : {'P27999'},
852811 : {'P53163'},
852813 : {'P53164'},
852814 : {'P53165'},
852815 : {'P43636'},
852816 : {'P53166'},
852817 : {'P53167'},
852818 : {'P11154'},
852819 : {'P53168'},
852820 : {'P53169'},
852821 : {'P53170'},
852822 : {'P06104'},
852823 : {'P53171'},
852824 : {'P53172'},
852825 : {'P21147'},
852826 : {'P53173'},
852828 : {'P53174'},
852830 : {'P53176'},
852832 : {'P53177'},
852833 : {'P39936'},
852834 : {'Q01939'},
852835 : {'P53178'},
852837 : {'P53179'},
852838 : {'P25299'},
852839 : {'P07273'},
852842 : {'P05373'},
852844 : {'P53183'},
852845 : {'P31755'},
852846 : {'P53184'},
852847 : {'P53185'},
852848 : {'P27705'},
852850 : {'P53187'},
852851 : {'P32781'},
852852 : {'P04449'},
852853 : {'P14120'},
852854 : {'P53188'},
852856 : {'P53189'},
852857 : {'P53008'},
852858 : {'P00931'},
852860 : {'P40356'},
852861 : {'P53191'},
852862 : {'P39007'},
852863 : {'P43633'},
852864 : {'P53192'},
852865 : {'P43639'},
852866 : {'P53193'},
852867 : {'P16639'},
852868 : {'P32767'},
852869 : {'P33199'},
852870 : {'P25339'},
852871 : {'P12383'},
852872 : {'P25340'},
852873 : {'P21243'},
852874 : {'P25338'},
852875 : {'P07264'},
852876 : {'P05030'},
852878 : {'P38929'},
852879 : {'P53195'},
852880 : {'P53196'},
852881 : {'P53197'},
852882 : {'P53198'},
852883 : {'P53199'},
852884 : {'P53200'},
852885 : {'P53201'},
852886 : {'P53202'},
852887 : {'P53203'},
852888 : {'P41896'},
852889 : {'P33411'},
852890 : {'P33412'},
852891 : {'P16965'},
852892 : {'P40357'},
852893 : {'P53204'},
852895 : {'P53206'},
852896 : {'P53207'},
852897 : {'P32334'},
852898 : {'P53208'},
852899 : {'P53209'},
852900 : {'P53210'},
852901 : {'P53211'},
852902 : {'P17649'},
852903 : {'P39111'},
852904 : {'P53212'},
852905 : {'P53214'},
852908 : {'P53215'},
852910 : {'P53217'},
852911 : {'Q3E792'},
852912 : {'Q12085'},
852913 : {'Q12141'},
852915 : {'P28737'},
852916 : {'P27882'},
852918 : {'P53218'},
852919 : {'P53219'},
852920 : {'P40989'},
852921 : {'P53220'},
852922 : {'P53221'},
852923 : {'P53222'},
852924 : {'P53223'},
852925 : {'P31787'},
852926 : {'P53224'},
852928 : {'Q12269'},
852929 : {'Q12485'},
852931 : {'P14681'},
852932 : {'P53226'},
852933 : {'P53227'},
852934 : {'P53228'},
852935 : {'P32338'},
852937 : {'P53230'},
852938 : {'P33339'},
852939 : {'P53044'},
852940 : {'P32564'},
852941 : {'P53231'},
852943 : {'P53233'},
852944 : {'P53234'},
852945 : {'P53235'},
852946 : {'P50276'},
852947 : {'P53236'},
852948 : {'P53237'},
852949 : {'P53238'},
852950 : {'P41901'},
852951 : {'P53045'},
852952 : {'P38972'},
852953 : {'P53239'},
852955 : {'P32914'},
852956 : {'P53241'},
852957 : {'P53242'},
852958 : {'P53243'},
852960 : {'P53244'},
852961 : {'P53046'},
852962 : {'P53246'},
852963 : {'P48412'},
852964 : {'Q02260'},
852966 : {'Q00723'},
852967 : {'P23369'},
852968 : {'P53248'},
852969 : {'P48363'},
852970 : {'P53249'},
852971 : {'P53250'},
852972 : {'P53251'},
852973 : {'P35180'},
852974 : {'P12754'},
852975 : {'P12686'},
852976 : {'Q3E757'},
852977 : {'P53252'},
852978 : {'P26263'},
852979 : {'P06115'},
852980 : {'P53253'},
852982 : {'P53254'},
852983 : {'P49704'},
852984 : {'P22204'},
852985 : {'P53255'},
852986 : {'P07806'},
852987 : {'P53256'},
852988 : {'P53257'},
852989 : {'P48361'},
852990 : {'Q03018'},
852991 : {'P53038'},
852992 : {'P53258'},
852993 : {'P53259'},
852994 : {'P53260'},
852995 : {'P53261'},
852996 : {'P32585'},
852997 : {'P41806'},
852998 : {'P53262'},
853002 : {'P24868'},
853003 : {'P32943'},
853005 : {'Q12173'},
853006 : {'Q99315'},
853007 : {'P53264'},
853008 : {'P53265'},
853009 : {'P53266'},
853010 : {'P53267'},
853011 : {'P23615'},
853014 : {'P53270'},
853015 : {'P0CX29', 'P0CX30'},
853016 : {'P48837'},
853017 : {'P53271'},
853019 : {'P40260'},
853020 : {'P53272'},
853023 : {'P53043'},
853025 : {'P49090'},
853026 : {'P53273'},
853027 : {'P53274'},
853028 : {'P53275'},
853029 : {'P53276'},
853030 : {'P53277'},
853031 : {'P53278'},
853032 : {'P53279'},
853033 : {'P40961'},
853034 : {'P29340'},
853035 : {'P53280'},
853036 : {'P23638'},
853037 : {'P53281'},
853039 : {'P53283'},
853041 : {'P32504'},
853042 : {'P53285'},
853043 : {'P53286'},
853045 : {'P33336'},
853047 : {'P32318'},
853048 : {'P48234'},
853049 : {'P48235'},
853050 : {'P37293'},
853051 : {'P24000'},
853052 : {'P48236'},
853053 : {'P48237'},
853056 : {'P13856'},
853057 : {'P48238'},
853058 : {'P48239'},
853059 : {'P32582'},
853060 : {'P39927'},
853061 : {'P05374'},
853062 : {'P48240'},
853064 : {'P27476'},
853065 : {'P53289'},
853066 : {'P0CX61', 'P0CX62'},
853067 : {'P0CX63', 'P0CX64'},
853068 : {'Q12316'},
853069 : {'P0CX65', 'P0CX66', 'P0CX67', 'P0CX68', 'P0CX69'},
853071 : {'P39935'},
853072 : {'P53290'},
853075 : {'P53292'},
853076 : {'P32893'},
853077 : {'P17891'},
853078 : {'P53293'},
853079 : {'P53294'},
853080 : {'P53037'},
853081 : {'P22438'},
853082 : {'P53039'},
853083 : {'P53295'},
853085 : {'P37267'},
853086 : {'P32476'},
853088 : {'P53296'},
853089 : {'P53297'},
853090 : {'P53298'},
853091 : {'P49723'},
853093 : {'P53299'},
853095 : {'P22289'},
853096 : {'P19812'},
853097 : {'P36421'},
853098 : {'P41895'},
853099 : {'P48362'},
853100 : {'P41695'},
853102 : {'P53301'},
853104 : {'P06775'},
853106 : {'P00359'},
853107 : {'P16451'},
853108 : {'P42826'},
853109 : {'P46948'},
853110 : {'P46949'},
853111 : {'P46950'},
853112 : {'P46951'},
853113 : {'P42934'},
853114 : {'P42935'},
853115 : {'P42936'},
853116 : {'P13259'},
853117 : {'P42937'},
853118 : {'P07245'},
853119 : {'P42938'},
853120 : {'P42939'},
853121 : {'P42940'},
853122 : {'P42941'},
853123 : {'P22803'},
853124 : {'P42942'},
853125 : {'P53303'},
853126 : {'P53304'},
853127 : {'P53047'},
853128 : {'P32905'},
853129 : {'P53305'},
853130 : {'P53306'},
853131 : {'P50077'},
853133 : {'P30822'},
853135 : {'P31334'},
853136 : {'P50078'},
853137 : {'P10834'},
853138 : {'P50079'},
853139 : {'P50080'},
853140 : {'P50082'},
853142 : {'P50076'},
853144 : {'P32566'},
853145 : {'P50084'},
853146 : {'P50085'},
853147 : {'P50086'},
853148 : {'P17442'},
853149 : {'P39676'},
853150 : {'P50087'},
853151 : {'P50088'},
853152 : {'P50089'},
853153 : {'P50090'},
853154 : {'P50091'},
853155 : {'P16861'},
853157 : {'P53309'},
853158 : {'P53311'},
853159 : {'P53312'},
853160 : {'P53313'},
853161 : {'P29056'},
853162 : {'P53314'},
853163 : {'P53315'},
853164 : {'P53050'},
853165 : {'P53316'},
853166 : {'P53317'},
853167 : {'Q03330'},
853168 : {'P32379'},
853169 : {'P00924'},
853170 : {'P53318'},
853172 : {'P53319'},
853173 : {'P53320'},
853174 : {'P07276'},
853175 : {'P53322'},
853177 : {'P46682'},
853178 : {'P53323'},
853179 : {'P53324'},
853181 : {'P00958'},
853182 : {'P53326'},
853183 : {'P51601'},
853185 : {'P40325'},
853186 : {'P40340'},
853187 : {'P53327'},
853188 : {'Q3E705'},
853190 : {'P53329'},
853191 : {'P46677'},
853192 : {'P53330'},
853193 : {'P53331'},
853194 : {'P53332'},
853195 : {'P53333'},
853196 : {'P53334'},
853197 : {'P53335'},
853198 : {'P53049'},
853199 : {'P15703'},
853200 : {'P53336'},
853201 : {'P53337'},
853202 : {'P32527'},
853203 : {'P32451'},
853204 : {'P53051'},
853205 : {'P53338'},
853207 : {'P53048'},
853209 : {'P53341'},
853210 : {'P53343'},
853212 : {'P53344'},
853213 : {'P0CX14', 'P0CX15'},
853214 : {'P40884'},
853216 : {'P40886'},
853217 : {'P40896'},
853218 : {'P40897'},
853219 : {'P32800'},
853221 : {'P07252'},
853222 : {'P08466'},
853223 : {'P39526'},
853224 : {'P39529'},
853225 : {'Q02820'},
853226 : {'P39531'},
853227 : {'P32524'},
853229 : {'P32525'},
853230 : {'P39533'},
853231 : {'P40889'},
853232 : {'P0CE88', 'P0CE89'},
853233 : {'P40890'},
853235 : {'P0CW40', 'P0CW41'},
853236 : {'P40885'},
853237 : {'P40892'},
853238 : {'P40893'},
853241 : {'P39535'},
853242 : {'P39538'},
853243 : {'P39540'},
853244 : {'P09119'},
853246 : {'P39542'},
853247 : {'P39543'},
853248 : {'P39516'},
853249 : {'P0C0W1'},
853250 : {'P04650'},
853252 : {'P32944'},
853253 : {'P46982'},
853254 : {'P46983'},
853255 : {'P46984'},
853256 : {'P46985'},
853257 : {'P46987'},
853259 : {'P22135'},
853260 : {'P46988'},
853261 : {'P46989'},
853262 : {'P46990'},
853264 : {'P32591'},
853265 : {'P39005'},
853266 : {'P26755'},
853267 : {'P27614'},
853268 : {'P46992'},
853269 : {'P46993'},
853271 : {'P46995'},
853272 : {'P08524'},
853273 : {'P08525'},
853274 : {'P38970'},
853275 : {'P06244'},
853276 : {'P46996'},
853277 : {'P46997'},
853279 : {'P46998'},
853280 : {'P46999'},
853281 : {'P32478'},
853282 : {'P47001'},
853283 : {'P21268'},
853285 : {'P47002'},
853286 : {'P32604'},
853287 : {'P34110'},
853288 : {'P11986'},
853290 : {'P14359'},
853292 : {'P47005'},
853293 : {'P47006'},
853294 : {'P47007'},
853295 : {'P46958'},
853296 : {'P47008'},
853297 : {'P47009'},
853298 : {'P39515'},
853300 : {'P14680'},
853301 : {'P20433'},
853302 : {'P26725'},
853303 : {'P10081'},
853304 : {'P47011'},
853305 : {'Q3E754'},
853307 : {'P47013'},
853308 : {'P10566'},
853309 : {'P47014'},
853310 : {'P47015'},
853311 : {'P07259'},
853312 : {'P12685'},
853313 : {'P08018'},
853315 : {'P35208'},
853316 : {'P47016'},
853317 : {'P46959'},
853318 : {'P47017'},
853319 : {'P47018'},
853320 : {'P47019'},
853322 : {'P46969'},
853323 : {'P47022'},
853325 : {'P46956'},
853326 : {'P46955'},
853327 : {'P32447'},
853329 : {'P47023'},
853330 : {'P47024'},
853332 : {'P47025'},
853333 : {'P42943'},
853334 : {'P42944'},
853335 : {'P42945'},
853336 : {'P42946'},
853337 : {'P42947'},
853338 : {'P32581'},
853339 : {'P42948'},
853340 : {'P42949'},
853341 : {'P42950'},
853342 : {'P39677'},
853344 : {'P32477'},
853345 : {'P42951'},
853346 : {'P40955'},
853347 : {'P40856'},
853348 : {'P40857'},
853349 : {'P40858'},
853350 : {'Q01389'},
853351 : {'P40309'},
853352 : {'P40310'},
853353 : {'P12954'},
853354 : {'P47026'},
853355 : {'P47027'},
853356 : {'P46954'},
853357 : {'P05150'},
853358 : {'P09880'},
853359 : {'P19658'},
853361 : {'P47029'},
853362 : {'P47030'},
853363 : {'P47031'},
853364 : {'P80428'},
853365 : {'P06105'},
853366 : {'P47032'},
853367 : {'P47033'},
853368 : {'P47034'},
853369 : {'P47035'},
853371 : {'P47037'},
853372 : {'P40358'},
853373 : {'P40359'},
853374 : {'P40360'},
853375 : {'P40361'},
853376 : {'P40362'},
853377 : {'P40363'},
853379 : {'P40364'},
853381 : {'P40366'},
853382 : {'P22353'},
853383 : {'Q3E7B2'},
853384 : {'P40367'},
853385 : {'P40368'},
853386 : {'P47039'},
853387 : {'P47040'},
853388 : {'P47041'},
853389 : {'P47042'},
853390 : {'P47043'},
853391 : {'P47044'},
853392 : {'P47045'},
853393 : {'P40335'},
853394 : {'Q3E837'},
853395 : {'P00360'},
853396 : {'P47046'},
853397 : {'P47047'},
853398 : {'P47048'},
853399 : {'P47049'},
853400 : {'P47050'},
853401 : {'P47051'},
853405 : {'P47052'},
853406 : {'P32806'},
853407 : {'P47053'},
853408 : {'P43638'},
853409 : {'P14907'},
853410 : {'P47054'},
853413 : {'P47055'},
853414 : {'P47056'},
853416 : {'P47057'},
853417 : {'P47058'},
853418 : {'P16474'},
853419 : {'P20448'},
853421 : {'Q00618'},
853422 : {'P40958'},
853423 : {'P47061'},
853425 : {'P47062'},
853426 : {'P47063'},
853427 : {'P09938'},
853428 : {'P40992'},
853429 : {'P47064'},
853432 : {'P47065'},
853433 : {'P47068'},
853434 : {'P47069'},
853436 : {'P47072'},
853438 : {'P39077'},
853439 : {'P47074'},
853441 : {'P47075'},
853442 : {'P47076'},
853445 : {'P47077'},
853447 : {'P47079'},
853450 : {'P46962'},
853452 : {'P08678'},
853453 : {'P41544'},
853454 : {'P47081'},
853455 : {'P41543'},
853456 : {'P38624'},
853457 : {'P47082'},
853458 : {'P47083'},
853459 : {'P47084'},
853460 : {'P20840'},
853461 : {'P27351'},
853462 : {'P46957'},
853463 : {'P20459'},
853464 : {'P47085'},
853465 : {'P00358'},
853466 : {'P08536'},
853467 : {'P46965'},
853468 : {'P47086'},
853469 : {'P47087'},
853470 : {'P47088'},
853471 : {'P47089'},
853472 : {'P47090'},
853473 : {'P39522'},
853475 : {'P22696'},
853477 : {'P41903'},
853478 : {'P21651'},
853479 : {'P47093'},
853481 : {'P47095'},
853482 : {'P47096'},
853483 : {'P0CX74', 'P0CX75', 'P0CX76'},
853484 : {'P47098'},
853485 : {'P47099'},
853486 : {'P47100'},
853487 : {'P47101'},
853488 : {'P47102'},
853489 : {'P47103'},
853490 : {'P47104'},
853491 : {'Q02772'},
853492 : {'P40352'},
853494 : {'P40985'},
853495 : {'P47107'},
853497 : {'P37020'},
853498 : {'P47108'},
853499 : {'P46673'},
853500 : {'P47110'},
853502 : {'P47111'},
853503 : {'P0CS90'},
853504 : {'P47112'},
853506 : {'P19211'},
853507 : {'P00044'},
853508 : {'P21373'},
853509 : {'P21374'},
853510 : {'P21375'},
853512 : {'P06779'},
853513 : {'P47113'},
853514 : {'P47114'},
853516 : {'P46973'},
853518 : {'P47115'},
853520 : {'P00572'},
853521 : {'Q00381'},
853522 : {'P47116'},
853523 : {'P17106'},
853524 : {'P40355'},
853525 : {'P40354'},
853526 : {'P32529'},
853527 : {'P40413'},
853528 : {'P47117'},
853529 : {'P35169'},
853530 : {'P47118'},
853531 : {'P40348'},
853532 : {'P47119'},
853534 : {'P47120'},
853535 : {'P47122'},
853536 : {'P05375'},
853537 : {'P47123'},
853538 : {'P47124'},
853539 : {'P32458'},
853540 : {'P23641'},
853541 : {'P47125'},
853542 : {'P47126'},
853543 : {'P47127'},
853544 : {'P47128'},
853545 : {'P47129'},
853546 : {'P47130'},
853547 : {'P47131'},
853548 : {'P18852'},
853550 : {'P47133'},
853551 : {'P47134'},
853552 : {'P24814'},
853553 : {'P47135'},
853554 : {'P47136'},
853555 : {'P45976'},
853556 : {'P21190'},
853557 : {'P0CX25', 'P0CX26'},
853558 : {'P33303'},
853559 : {'P47137'},
853560 : {'P47138'},
853561 : {'P47139'},
853562 : {'P35127'},
853563 : {'P47140'},
853565 : {'P47141'},
853566 : {'P47142'},
853567 : {'P38627'},
853568 : {'P00445'},
853569 : {'P47143'},
853570 : {'P47144'},
853571 : {'P47145'},
853572 : {'P47146'},
853573 : {'P03965'},
853574 : {'P47147'},
853575 : {'P47148'},
853576 : {'P47149'},
853578 : {'P47150'},
853579 : {'P47152'},
853580 : {'P47153'},
853581 : {'P47154'},
853582 : {'P47155'},
853583 : {'P47156'},
853584 : {'P47157'},
853585 : {'P00830'},
853586 : {'P47158'},
853587 : {'P26783'},
853588 : {'P47159'},
853589 : {'P47160'},
853590 : {'P47161'},
853592 : {'P46974'},
853593 : {'P47163'},
853594 : {'P47164'},
853595 : {'P32906'},
853596 : {'P46970'},
853597 : {'P47165'},
853598 : {'P47166'},
853599 : {'P47167'},
853600 : {'P57744'},
853601 : {'P47168'},
853602 : {'P47169'},
853603 : {'P47170'},
853604 : {'P31116'},
853605 : {'P47171'},
853606 : {'P47172'},
853607 : {'P47173'},
853608 : {'P46971'},
853609 : {'P32787'},
853610 : {'P0CX35', 'P0CX36'},
853611 : {'P47174'},
853612 : {'P47175'},
853613 : {'P47176'},
853614 : {'P47177'},
853615 : {'P47178'},
853616 : {'P47179'},
853617 : {'P15365'},
853618 : {'P47180'},
853619 : {'P47181'},
853620 : {'P47182'},
853621 : {'P47183'},
853623 : {'P47185'},
853624 : {'P35497'},
853625 : {'P0CE00'},
853626 : {'P47187'},
853628 : {'P36039'},
853629 : {'P36040'},
853630 : {'P33418'},
853631 : {'P36041'},
853632 : {'P32600'},
853634 : {'P36044'},
853635 : {'P36002'},
853636 : {'P24004'},
853638 : {'P36015'},
853639 : {'P36046'},
853640 : {'P07236'},
853641 : {'P36047'},
853642 : {'P32463'},
853643 : {'P32461'},
853644 : {'P25296'},
853646 : {'P32464'},
853647 : {'P34230'},
853648 : {'P34231'},
853649 : {'P34232'},
853650 : {'P34233'},
853651 : {'P08432'},
853652 : {'P34234'},
853653 : {'P07149'},
853654 : {'P32895'},
853656 : {'P35994'},
853658 : {'P35995'},
853659 : {'P36032'},
853660 : {'P36033'},
853661 : {'P36034'},
853662 : {'P36007'},
853663 : {'P36035'},
853664 : {'P28272'},
853665 : {'P28273'},
853666 : {'P36036'},
853667 : {'P36037'},
853668 : {'P32368'},
853669 : {'P00937'},
853670 : {'P22515'},
853671 : {'P12866'},
853673 : {'P36038'},
853674 : {'P05740'},
853675 : {'P34237'},
853677 : {'P06783'},
853678 : {'P34239'},
853679 : {'P34240'},
853680 : {'P36029'},
853681 : {'P36048'},
853682 : {'P36049'},
853683 : {'P36003'},
853684 : {'P35996'},
853686 : {'P36004'},
853687 : {'P32388'},
853688 : {'P05986'},
853690 : {'P36051'},
853692 : {'Q03178'},
853693 : {'Q03180'},
853695 : {'P36052'},
853696 : {'P36005'},
853697 : {'P36053'},
853698 : {'P36054'},
853699 : {'P32454'},
853700 : {'P35997'},
853701 : {'P36056'},
853702 : {'P36057'},
853705 : {'P00950'},
853706 : {'P36059'},
853707 : {'P36060'},
853708 : {'P24309'},
853709 : {'Q00711'},
853710 : {'P36062'},
853712 : {'P33299'},
853713 : {'P35718'},
853714 : {'P34078'},
853715 : {'P35719'},
853716 : {'P33421'},
853717 : {'P34163'},
853718 : {'Q03957'},
853719 : {'P69852'},
853720 : {'P14063'},
853721 : {'P36064'},
853723 : {'P36000'},
853724 : {'P35999'},
853725 : {'P36066'},
853727 : {'P36001'},
853728 : {'P36068'},
853729 : {'P36006'},
853730 : {'P36069'},
853732 : {'P33401'},
853733 : {'P12688'},
853734 : {'P36070'},
853735 : {'P32343'},
853737 : {'P32342'},
853738 : {'P32330'},
853739 : {'P32332'},
853741 : {'P32341'},
853743 : {'P28707'},
853744 : {'P28708'},
853746 : {'P22936'},
853747 : {'P26793'},
853748 : {'P14164'},
853750 : {'P34253'},
853751 : {'P14064'},
853752 : {'P34252'},
853753 : {'P34251'},
853754 : {'Q3E7A0'},
853755 : {'Q01802'},
853756 : {'P34250'},
853757 : {'P14742'},
853758 : {'P14904'},
853760 : {'P34244'},
853761 : {'P34248'},
853762 : {'P34247'},
853763 : {'P34246'},
853765 : {'P43497'},
853766 : {'P28319'},
853767 : {'P28320'},
853768 : {'P28321'},
853769 : {'P23493'},
853770 : {'P33314'},
853771 : {'P33324'},
853772 : {'P36075'},
853773 : {'P35201'},
853774 : {'P36076'},
853775 : {'Q00873'},
853776 : {'P36077'},
853777 : {'P17505'},
853778 : {'P36078'},
853780 : {'P36080'},
853781 : {'P36008'},
853782 : {'P31412'},
853783 : {'P32364'},
853784 : {'P36009'},
853785 : {'P36081'},
853787 : {'P36083'},
853788 : {'P36084'},
853789 : {'P36016'},
853790 : {'P36085'},
853792 : {'P36086'},
853793 : {'P36087'},
853794 : {'P36088'},
853796 : {'Q02629'},
853798 : {'P36010'},
853800 : {'P35723'},
853801 : {'P35724'},
853802 : {'P35725'},
853803 : {'P33749'},
853804 : {'P35727'},
853805 : {'P14540'},
853806 : {'P35728'},
853807 : {'P32774'},
853808 : {'P35729'},
853809 : {'P35691'},
853810 : {'P35731'},
853811 : {'P35732'},
853812 : {'O60200'},
853814 : {'P35734'},
853815 : {'P35735'},
853816 : {'P35736'},
853817 : {'P36012'},
853818 : {'P32801'},
853819 : {'P36090'},
853820 : {'P36091'},
853821 : {'P20457'},
853822 : {'P36092'},
853823 : {'P36093'},
853824 : {'P36094'},
853825 : {'P36095'},
853826 : {'P32860'},
853827 : {'P32857'},
853828 : {'P32862'},
853829 : {'P32858'},
853830 : {'P32861'},
853832 : {'P36096'},
853833 : {'Q86ZR7'},
853834 : {'P36097'},
853836 : {'P33417'},
853839 : {'P36013'},
853840 : {'P36100'},
853841 : {'P36101'},
853842 : {'P36014'},
853843 : {'P36102'},
853844 : {'P15700'},
853845 : {'P36103'},
853846 : {'P09798'},
853847 : {'P20484'},
853848 : {'P35210'},
853849 : {'P29703'},
853850 : {'Q3E7A7'},
853851 : {'P36104'},
853852 : {'P34243'},
853853 : {'P30902'},
853854 : {'P25502'},
853855 : {'P34241'},
853856 : {'P33204'},
853857 : {'P33203'},
853858 : {'Q03702'},
853859 : {'P33202'},
853860 : {'P33201'},
853861 : {'P28496'},
853862 : {'P28495'},
853863 : {'P43682'},
853864 : {'P36105'},
853865 : {'P36106'},
853866 : {'P36107'},
853867 : {'P28778'},
853868 : {'P36108'},
853869 : {'Q02196'},
853870 : {'P21576'},
853871 : {'P29468'},
853872 : {'Q02201'},
853873 : {'Q02202'},
853874 : {'Q02203'},
853875 : {'Q02204'},
853876 : {'Q02205'},
853877 : {'Q02206'},
853878 : {'Q02207'},
853880 : {'Q02208'},
853881 : {'Q02209'},
853882 : {'P36110'},
853884 : {'P36018'},
853885 : {'P36111'},
853886 : {'P36112'},
853887 : {'P36113'},
853888 : {'P36114'},
853889 : {'P36115'},
853890 : {'P36116'},
853891 : {'P36117'},
853892 : {'P36118'},
853893 : {'P36119'},
853894 : {'P36120'},
853895 : {'P36121'},
853896 : {'P14741'},
853898 : {'P36122'},
853899 : {'P36123'},
853900 : {'P36124'},
853901 : {'P36125'},
853902 : {'P36126'},
853904 : {'P26343'},
853906 : {'P69771'},
853908 : {'P36130'},
853909 : {'P36131'},
853910 : {'P36132'},
853912 : {'P19145'},
853914 : {'P36134'},
853916 : {'P36135'},
853917 : {'P36136'},
853918 : {'P36137'},
853919 : {'P36138'},
853920 : {'P36139'},
853922 : {'P25293'},
853923 : {'P36141'},
853924 : {'P28584'},
853925 : {'P36142'},
853926 : {'P23500'},
853927 : {'P23501'},
853928 : {'P36022'},
853929 : {'Q00246'},
853930 : {'P33753'},
853931 : {'P0C0V8'},
853932 : {'P36143'},
853933 : {'P10081'},
853934 : {'P36144'},
853935 : {'P33550'},
853936 : {'P36145'},
853937 : {'P36146'},
853938 : {'P36023'},
853939 : {'P36147'},
853940 : {'P00431'},
853941 : {'P36148'},
853942 : {'P36149'},
853943 : {'P36150'},
853944 : {'P36151'},
853945 : {'P36152'},
853946 : {'P36024'},
853949 : {'P36154'},
853950 : {'P36155'},
853951 : {'P36156'},
853952 : {'P36157'},
853953 : {'P36158'},
853954 : {'P36159'},
853955 : {'Q02046'},
853956 : {'P36160'},
853957 : {'P36161'},
853958 : {'P36162'},
853959 : {'P32769'},
853960 : {'P22354'},
853961 : {'P15938'},
853962 : {'P36163'},
853963 : {'P36164'},
853964 : {'P36165'},
853965 : {'P36166'},
853966 : {'P36167'},
853967 : {'P32583'},
853968 : {'P32901'},
853969 : {'P0CH08', 'P0CH09'},
853970 : {'Q02455'},
853971 : {'P36168'},
853972 : {'P10963'},
853973 : {'P36026'},
853974 : {'P22035'},
853975 : {'P36169'},
853976 : {'P21691'},
853977 : {'P36170'},
853978 : {'P0CE68'},
853979 : {'P0CE69'},
853980 : {'P36172'},
853981 : {'P36173'},
853982 : {'Q08281'},
853983 : {'Q08280'},
853984 : {'Q12471'},
853985 : {'Q08278'},
853986 : {'Q08273'},
853988 : {'Q08271'},
853989 : {'Q08270'},
853990 : {'Q08269'},
853991 : {'Q12016'},
853992 : {'Q12222'},
853993 : {'P04456'},
853994 : {'P22133'},
853995 : {'Q12383'},
853996 : {'Q12463'},
853997 : {'Q99383'},
853999 : {'Q08361'},
854000 : {'Q08347'},
854001 : {'P0CF20'},
854002 : {'P0CF19'},
854003 : {'Q08322'},
854005 : {'Q3E769'},
854006 : {'Q08300'},
854007 : {'Q08299'},
854008 : {'Q08295'},
854009 : {'P54862'},
854010 : {'Q05164'},
854011 : {'Q12512'},
854013 : {'Q12333'},
854014 : {'Q12068'},
854016 : {'Q12517'},
854017 : {'P50875'},
854018 : {'Q12462'},
854019 : {'Q12146'},
854020 : {'P89105'},
854021 : {'Q08287'},
854022 : {'P50861'},
854023 : {'Q08285'},
854024 : {'Q08282'},
854025 : {'P18544'},
854026 : {'P07260'},
854027 : {'P38925'},
854028 : {'P07280'},
854029 : {'P0CX49', 'P0CX50'},
854030 : {'Q08268'},
854032 : {'Q12348'},
854033 : {'P22148'},
854034 : {'P53632'},
854035 : {'Q12322'},
854036 : {'Q12469'},
854037 : {'Q12317'},
854038 : {'Q12285'},
854039 : {'P41912'},
854040 : {'Q08245'},
854042 : {'P13902'},
854043 : {'Q12239'},
854046 : {'Q12215'},
854047 : {'Q12366'},
854048 : {'Q92392'},
854049 : {'Q12273'},
854050 : {'P30606'},
854051 : {'Q12272'},
854052 : {'Q99393'},
854053 : {'Q12236'},
854055 : {'Q12496'},
854056 : {'Q12109'},
854057 : {'P27680'},
854058 : {'Q12039'},
854059 : {'P40339'},
854060 : {'Q12400'},
854061 : {'Q12010'},
854062 : {'Q12411'},
854063 : {'P25847'},
854064 : {'Q12180'},
854065 : {'Q99316'},
854066 : {'Q99247'},
854067 : {'Q3E835'},
854068 : {'P00330'},
854070 : {'Q12252'},
854071 : {'Q12292'},
854072 : {'P35193'},
854073 : {'P19158'},
854075 : {'Q08237'},
854076 : {'Q08236'},
854077 : {'P81451'},
854078 : {'Q08235'},
854079 : {'Q12387'},
854080 : {'Q08234'},
854081 : {'Q08232'},
854082 : {'Q08231'},
854083 : {'Q08230'},
854084 : {'Q08229'},
854085 : {'P33895'},
854086 : {'P53685'},
854087 : {'P32607'},
854088 : {'Q12362'},
854089 : {'Q08227'},
854090 : {'P32179'},
854091 : {'Q08226'},
854092 : {'Q99186'},
854093 : {'Q12265'},
854094 : {'Q12296'},
854095 : {'P41911'},
854096 : {'P22768'},
854097 : {'Q08225'},
854098 : {'Q12326'},
854099 : {'Q08224'},
854102 : {'Q12161'},
854103 : {'Q08223'},
854104 : {'P89113'},
854105 : {'P21182'},
854106 : {'P19659'},
854108 : {'Q08220'},
854109 : {'Q08219'},
854110 : {'Q08218'},
854111 : {'Q08217'},
854113 : {'Q08215'},
854114 : {'Q08214'},
854115 : {'Q08213'},
854116 : {'Q08208'},
854117 : {'Q01855'},
854118 : {'P05319'},
854119 : {'P40303'},
854120 : {'Q08206'},
854123 : {'Q08204'},
854124 : {'P48525'},
854125 : {'Q08202'},
854126 : {'Q08199'},
854127 : {'Q08193'},
854128 : {'Q08187'},
854129 : {'Q08182'},
854130 : {'Q08179'},
854131 : {'Q08176'},
854133 : {'Q92325'},
854134 : {'Q08172'},
854135 : {'P25038'},
854136 : {'P25040'},
854138 : {'Q08162'},
854139 : {'P38967'},
854141 : {'Q08157'},
854142 : {'Q08144'},
854143 : {'Q08119'},
854144 : {'P22517'},
854145 : {'Q08118'},
854146 : {'Q08110'},
854147 : {'P0C272'},
854149 : {'Q08109'},
854150 : {'Q12692'},
854151 : {'Q08108'},
854152 : {'Q08096'},
854153 : {'Q92328'},
854154 : {'Q08058'},
854155 : {'Q08054'},
854156 : {'P04786'},
854157 : {'P38902'},
854158 : {'P22579'},
854159 : {'Q12006'},
854160 : {'Q12442'},
854161 : {'P20052'},
854162 : {'Q12149'},
854163 : {'Q12001'},
854164 : {'P25036'},
854165 : {'Q12339'},
854166 : {'Q08387'},
854167 : {'Q12094'},
854168 : {'Q12118'},
854170 : {'P54867'},
854171 : {'Q3E7B9'},
854173 : {'Q12218'},
854174 : {'P33890'},
854175 : {'Q08409'},
854177 : {'Q12351'},
854179 : {'P38903'},
854181 : {'Q12450'},
854182 : {'P32606'},
854183 : {'Q02805'},
854184 : {'Q99248'},
854185 : {'P38910'},
854186 : {'Q12314'},
854187 : {'Q12204'},
854188 : {'Q12433'},
854190 : {'P53687'},
854191 : {'P26449'},
854192 : {'P15705'},
854193 : {'P40917'},
854195 : {'Q99234'},
854197 : {'Q12398'},
854198 : {'P39875'},
854199 : {'Q12013'},
854200 : {'P51534'},
854201 : {'P32854'},
854202 : {'P38909'},
854203 : {'P32480'},
854204 : {'P38930'},
854205 : {'Q12320'},
854206 : {'Q08412'},
854208 : {'P12611'},
854209 : {'Q08416'},
854210 : {'P33448'},
854211 : {'P20449'},
854212 : {'Q02794'},
854213 : {'Q02792'},
854214 : {'Q08417'},
854216 : {'Q08421'},
854217 : {'Q08422'},
854220 : {'Q08438'},
854221 : {'Q08444'},
854222 : {'Q08446'},
854223 : {'P50275'},
854225 : {'Q08448'},
854226 : {'Q08457'},
854227 : {'P19454'},
854228 : {'P36025'},
854229 : {'P14126'},
854230 : {'Q08465'},
854231 : {'P07143'},
854232 : {'Q08471'},
854233 : {'P40351'},
854234 : {'Q08474'},
854235 : {'Q92331'},
854236 : {'Q08484'},
854237 : {'Q08485'},
854240 : {'Q08490'},
854241 : {'P06785'},
854242 : {'P41834'},
854243 : {'Q08491'},
854244 : {'P40962'},
854245 : {'Q08492'},
854246 : {'Q12067'},
854247 : {'Q08496'},
854248 : {'Q12043'},
854249 : {'Q12416'},
854251 : {'Q12405'},
854252 : {'P48439'},
854253 : {'Q12466'},
854255 : {'Q12324'},
854256 : {'P36017'},
854257 : {'Q12511'},
854258 : {'Q12000'},
854259 : {'Q99252'},
854260 : {'Q12275'},
854261 : {'P40994'},
854262 : {'Q12189'},
854263 : {'P26786'},
854264 : {'Q12274'},
854265 : {'P20676'},
854266 : {'P27810'},
854267 : {'Q12289'},
854268 : {'P01119'},
854270 : {'P46964'},
854271 : {'Q12057'},
854272 : {'Q08504'},
854273 : {'Q12241'},
854274 : {'Q99188'},
854275 : {'Q12166'},
854276 : {'Q12271'},
854277 : {'Q12415'},
854278 : {'Q99210'},
854279 : {'Q12453'},
854280 : {'P41696'},
854281 : {'Q12219'},
854282 : {'Q99394'},
854283 : {'P04051'},
854284 : {'P33297'},
854285 : {'Q12108'},
854286 : {'Q12196'},
854287 : {'P14065'},
854289 : {'P07274'},
854290 : {'P38439'},
854291 : {'Q01476'},
854292 : {'P41735'},
854293 : {'P41734'},
854294 : {'P39083'},
854295 : {'P21264'},
854296 : {'Q99222'},
854297 : {'Q12375'},
854299 : {'Q12486'},
854300 : {'P32913'},
854301 : {'P32324'},
854302 : {'Q12128'},
854303 : {'P28241'},
854305 : {'Q12212'},
854306 : {'Q12242'},
854307 : {'P20134'},
854309 : {'Q12386'},
854310 : {'P53598'},
854312 : {'P0CX59', 'P0CX60'},
854313 : {'Q92393'},
854314 : {'P35202'},
854315 : {'Q12050'},
854317 : {'Q99216'},
854318 : {'Q12171'},
854319 : {'Q02521'},
854320 : {'Q04174'},
854321 : {'Q12487'},
854322 : {'P08518'},
854323 : {'Q99325'},
854324 : {'P33302'},
854325 : {'Q12232'},
854326 : {'Q99312'},
854327 : {'Q12216'},
854328 : {'P25043'},
854329 : {'P17558'},
854330 : {'Q12330'},
854331 : {'Q99189'},
854332 : {'Q12412'},
854333 : {'Q12172'},
854334 : {'Q99321'},
854335 : {'Q12125'},
854336 : {'Q99287'},
854337 : {'Q12104'},
854338 : {'Q3E7X9'},
854339 : {'P13188'},
854342 : {'Q12246'},
854343 : {'Q12340'},
854344 : {'Q12123'},
854345 : {'Q12343'},
854346 : {'Q08548'},
854347 : {'P16622'},
854349 : {'Q08550'},
854350 : {'P28006'},
854351 : {'Q08553'},
854352 : {'Q08558'},
854353 : {'Q12446'},
854354 : {'P0CX33', 'P0CX34'},
854355 : {'Q08559'},
854356 : {'P33330'},
854357 : {'P32836'},
854358 : {'Q08560'},
854359 : {'P02992'},
854360 : {'P21339'},
854361 : {'Q08561'},
854362 : {'P32603'},
854363 : {'Q08562'},
854364 : {'Q08579'},
854365 : {'Q12113'},
854366 : {'Q12439'},
854368 : {'Q08580'},
854369 : {'P32773'},
854370 : {'Q08581'},
854371 : {'P32875'},
854372 : {'Q08601'},
854373 : {'P38934'},
854376 : {'P25270'},
854377 : {'P06633'},
854379 : {'P06634'},
854380 : {'Q08622'},
854381 : {'P39744'},
854382 : {'P22276'},
854383 : {'P29461'},
854384 : {'P39683'},
854385 : {'P22139'},
854386 : {'P32266'},
854387 : {'P18851'},
854388 : {'Q99314'},
854389 : {'Q12282'},
854390 : {'Q12032'},
854391 : {'Q12234'},
854392 : {'P38630'},
854394 : {'P33894'},
854395 : {'Q12044'},
854396 : {'Q12283'},
854397 : {'Q99297'},
854398 : {'Q12015'},
854399 : {'P20436'},
854401 : {'Q12056'},
854402 : {'Q12276'},
854403 : {'Q12106'},
854404 : {'Q12206'},
854405 : {'Q12363'},
854406 : {'P32490'},
854407 : {'P38523'},
854408 : {'Q01919'},
854409 : {'P41056'},
854411 : {'P07807'},
854412 : {'P35843'},
854413 : {'Q08634'},
854414 : {'Q08641'},
854415 : {'Q08645'},
854416 : {'Q08646'},
854417 : {'Q08647'},
854418 : {'Q08649'},
854419 : {'Q08650'},
854420 : {'Q08651'},
854421 : {'Q08673'},
854423 : {'Q08683'},
854424 : {'Q08685'},
854425 : {'Q08686'},
854426 : {'Q08687'},
854427 : {'Q08689'},
854428 : {'P14906'},
854429 : {'Q08692'},
854430 : {'Q08693'},
854431 : {'P06704'},
854432 : {'Q08702'},
854433 : {'P53549'},
854434 : {'P09032'},
854435 : {'Q08723'},
854436 : {'Q08726'},
854437 : {'Q08729'},
854439 : {'P48606'},
854440 : {'P38969'},
854441 : {'Q08732'},
854442 : {'Q08734'},
854443 : {'P39946'},
854444 : {'P32563'},
854445 : {'Q12029'},
854446 : {'Q12024'},
854447 : {'Q12256'},
854448 : {'P07884'},
854449 : {'Q12033'},
854450 : {'P12962'},
854452 : {'P06174'},
854453 : {'Q12192'},
854454 : {'Q99369'},
854456 : {'Q12017'},
854457 : {'Q12040'},
854458 : {'Q12134'},
854459 : {'Q12305'},
854460 : {'Q08742'},
854461 : {'Q12481'},
854462 : {'Q12404'},
854463 : {'Q12012'},
854465 : {'P22082'},
854466 : {'Q12697'},
854467 : {'Q08743'},
854468 : {'Q08745'},
854469 : {'Q08746'},
854470 : {'Q08747'},
854471 : {'Q08748'},
854472 : {'Q08749'},
854473 : {'Q08750'},
854474 : {'O14467'},
854475 : {'Q08754'},
854477 : {'Q08760'},
854478 : {'P08521'},
854479 : {'P07258'},
854480 : {'Q08773'},
854481 : {'O14468'},
854482 : {'Q08774'},
854483 : {'Q08777'},
854484 : {'P22215'},
854485 : {'Q12420'},
854487 : {'Q12499'},
854488 : {'Q12382'},
854489 : {'P0CX23', 'P0CX24'},
854490 : {'P09937'},
854493 : {'Q12507'},
854494 : {'P32798'},
854495 : {'P30624'},
854497 : {'Q99181'},
854498 : {'Q12096'},
854499 : {'P47190'},
854500 : {'Q12502'},
854501 : {'P54885'},
854503 : {'Q99332'},
854504 : {'P19524'},
854505 : {'P33328'},
854506 : {'P51533'},
854507 : {'P34758'},
854508 : {'P15801'},
854509 : {'P22203'},
854511 : {'Q01926'},
854513 : {'P40825'},
854514 : {'P22023'},
854515 : {'P47988'},
854516 : {'Q99326'},
854517 : {'P52492'},
854518 : {'P46669'},
854519 : {'P10964'},
854520 : {'Q12182'},
854522 : {'Q12293'},
854525 : {'P33122'},
854527 : {'P12689'},
854529 : {'P52489'},
854530 : {'P15380'},
854531 : {'P40987'},
854532 : {'P24720'},
854533 : {'P24719'},
854534 : {'Q08816'},
854535 : {'Q08817'},
854536 : {'Q08818'},
854537 : {'P41913'},
854538 : {'Q08822'},
854539 : {'Q08826'},
854540 : {'Q02516'},
854541 : {'Q08831'},
854542 : {'P06776'},
854543 : {'P06103'},
854544 : {'P21242'},
854545 : {'P52960'},
854548 : {'Q08844'},
854549 : {'Q08873'},
854550 : {'P48581'},
854551 : {'P48589'},
854552 : {'P32864'},
854553 : {'Q08886'},
854554 : {'Q08887'},
854555 : {'P32336'},
854556 : {'P46367'},
854557 : {'P07262'},
854559 : {'P40353'},
854560 : {'Q08902'},
854562 : {'Q08904'},
854563 : {'Q08905'},
854564 : {'Q08906'},
854565 : {'Q08907'},
854566 : {'Q08908'},
854567 : {'Q08909'},
854568 : {'P05066'},
854569 : {'Q08910'},
854570 : {'Q08911'},
854571 : {'Q08912'},
854572 : {'Q08913'},
854573 : {'Q08914'},
854575 : {'P0CX10', 'P0CX11'},
854576 : {'P0CE86', 'P0CE87'},
854577 : {'P0CX20', 'P0CX21', 'P0CX22'},
854582 : {'P03879'},
854583 : {'P00163'},
854584 : {'P61829'},
854586 : {'P02381'},
854590 : {'P03882'},
854593 : {'P03875'},
854594 : {'P03876'},
854595 : {'P03877'},
854596 : {'P03878'},
854597 : {'Q9ZZX1'},
854598 : {'P00401'},
854599 : {'Q9ZZX0'},
854600 : {'P00856'},
854601 : {'P00854'},
854604 : {'P03873'},
854605 : {'Q9ZZW7'},
854622 : {'P00410'},
854623 : {'P03881'},
854627 : {'P00420'},
854630 : {'P40434'},
854631 : {'P0CE88', 'P0CE89'},
854634 : {'P40438'},
854635 : {'P0CW40', 'P0CW41'},
854637 : {'P40442'},
854640 : {'P40445'},
854641 : {'P40446'},
854642 : {'P40447'},
854643 : {'P40448'},
854644 : {'P00724'},
854645 : {'P40449'},
854646 : {'P27796'},
854647 : {'P40450'},
854648 : {'P40451'},
854649 : {'P40452'},
854650 : {'P40453'},
854651 : {'P32191'},
854652 : {'P32351'},
854653 : {'P40454'},
854654 : {'P40455'},
854655 : {'P40456'},
854656 : {'P32354'},
854657 : {'P40457'},
854658 : {'P0CH08', 'P0CH09'},
854659 : {'P39928'},
854660 : {'P40458'},
854661 : {'P40459'},
854662 : {'P40460'},
854663 : {'Q00578'},
854664 : {'P39076'},
854666 : {'P38928'},
854667 : {'P38927'},
854668 : {'P40414'},
854669 : {'P40462'},
854670 : {'P16547'},
854671 : {'P40463'},
854672 : {'P40464'},
854673 : {'P26784'},
854674 : {'P40465'},
854675 : {'P40466'},
854676 : {'P40467'},
854677 : {'P40468'},
854678 : {'P40469'},
854679 : {'P40470'},
854680 : {'P32597'},
854681 : {'P20967'},
854682 : {'P40471'},
854683 : {'P40472'},
854684 : {'P40473'},
854685 : {'P40474'},
854686 : {'P40475'},
854687 : {'P23250'},
854688 : {'Q00245'},
854689 : {'P40476'},
854690 : {'P07172'},
854691 : {'P40477'},
854692 : {'P40478'},
854693 : {'P40479'},
854694 : {'P40480'},
854695 : {'P00425'},
854696 : {'P40481'},
854697 : {'P40482'},
854698 : {'P40483'},
854699 : {'P40433'},
854700 : {'P40484'},
854701 : {'P40485'},
854702 : {'P40486'},
854703 : {'P40487'},
854704 : {'P40488'},
854706 : {'P40489'},
854708 : {'P08019'},
854709 : {'P40491'},
854710 : {'P40492'},
854711 : {'P40493'},
854713 : {'P40494'},
854714 : {'P40495'},
854715 : {'P40496'},
854716 : {'P40497'},
854717 : {'P40498'},
854718 : {'P40499'},
854719 : {'P40500'},
854721 : {'P40501'},
854722 : {'P40502'},
854724 : {'P40504'},
854725 : {'P40505'},
854726 : {'P40506'},
854728 : {'Q7LHG5'},
854731 : {'P40507'},
854732 : {'P04801'},
854733 : {'P40508'},
854734 : {'P40509'},
854735 : {'P32565'},
854736 : {'P40510'},
854737 : {'P40511'},
854738 : {'P20050'},
854739 : {'P40512'},
854740 : {'P40513'},
854741 : {'P0CX31', 'P0CX32'},
854742 : {'P32844'},
854743 : {'P40514'},
854744 : {'P21672'},
854745 : {'P40515'},
854746 : {'P40516'},
854747 : {'P40517'},
854748 : {'P40518'},
854749 : {'Q00916'},
854750 : {'P40519'},
854753 : {'P40188'},
854755 : {'P40522'},
854756 : {'P40523'},
854757 : {'P40524'},
854758 : {'P41277'},
854759 : {'P40525'},
854760 : {'P40185'},
854761 : {'P40186'},
854762 : {'P40526'},
854763 : {'P40527'},
854764 : {'P40528'},
854765 : {'P39014'},
854766 : {'P40187'},
854767 : {'P40529'},
854768 : {'P38626'},
854769 : {'P40530'},
854770 : {'P40531'},
854771 : {'P40532'},
854772 : {'P40533'},
854773 : {'P06102'},
854774 : {'P40534'},
854775 : {'P40535'},
854776 : {'P15790'},
854777 : {'P13517'},
854778 : {'P07278'},
854780 : {'P40537'},
854781 : {'P40318'},
854783 : {'P40538'},
854785 : {'P40540'},
854786 : {'P40541'},
854788 : {'P40543'},
854789 : {'P40544'},
854790 : {'Q01852'},
854791 : {'P16370'},
854792 : {'P40545'},
854793 : {'P40546'},
854794 : {'P0CX45', 'P0CX46'},
854795 : {'P40547'},
854796 : {'P40548'},
854797 : {'P12630'},
854799 : {'Q02598'},
854801 : {'P40549'},
854802 : {'P40550'},
854804 : {'P40552'},
854805 : {'P40553'},
854806 : {'Q03096'},
854808 : {'P39002'},
854809 : {'P40554'},
854810 : {'P40555'},
854811 : {'P40556'},
854812 : {'P40557'},
854813 : {'P22804'},
854814 : {'P40558'},
854815 : {'P40559'},
854816 : {'P40560'},
854817 : {'P40561'},
854818 : {'P40562'},
854819 : {'P40563'},
854820 : {'P40564'},
854821 : {'P40565'},
854822 : {'P32521'},
854824 : {'P40566'},
854825 : {'P10363'},
854826 : {'P40567'},
854827 : {'P40568'},
854828 : {'P38637'},
854829 : {'P35184'},
854830 : {'P40569'},
854831 : {'P40570'},
854832 : {'P40571'},
854833 : {'P40572'},
854834 : {'P40573'},
854835 : {'P40574'},
854836 : {'P08640'},
854837 : {'P40575'},
854839 : {'P07266'},
854840 : {'P15367'},
854841 : {'P21657'},
854842 : {'P40576'},
854843 : {'P40577'},
854844 : {'Q02256'},
854845 : {'P32375'},
854846 : {'Q04895'},
854847 : {'P25335'},
854848 : {'P32460'},
854849 : {'P21826'},
854850 : {'P32459'},
854851 : {'P40578'},
854852 : {'P38998'},
854853 : {'P40579'},
854854 : {'P40580'},
854855 : {'P40581'},
854856 : {'P40582'},
854857 : {'P40583'},
854859 : {'P40585'},
854860 : {'P40586'},
854863 : {'Q03759'},
854864 : {'Q03760'},
854865 : {'P13298'},
854866 : {'P29478'},
854867 : {'Q01846'},
854868 : {'P52593'},
854869 : {'Q04199'},
854871 : {'Q04201'},
854872 : {'P38427'},
854874 : {'P05085'},
854875 : {'P11747'},
854876 : {'P54787'},
854877 : {'Q04489'},
854878 : {'P06838'},
854879 : {'Q04493'},
854881 : {'Q04500'},
854882 : {'P23639'},
854883 : {'Q02773'},
854886 : {'Q04511'},
854887 : {'Q04516'},
854888 : {'P54783'},
854889 : {'P09733'},
854891 : {'Q04526'},
854892 : {'Q04533'},
854893 : {'P81450'},
854894 : {'Q04545'},
854895 : {'P53759'},
854896 : {'Q03629'},
854897 : {'P25719'},
854898 : {'Q03630'},
854899 : {'Q03631'},
854900 : {'P12683'},
854901 : {'P38911'},
854902 : {'Q02326'},
854903 : {'Q03640'},
854904 : {'Q04632'},
854906 : {'Q03099'},
854907 : {'P0CX12', 'P0CX13'},
854908 : {'Q03102'},
854909 : {'Q03103'},
854910 : {'P39103'},
854911 : {'Q03104'},
854912 : {'Q03124'},
854913 : {'P54839'},
854914 : {'Q12746'},
854915 : {'P09734'},
854916 : {'P25297'},
854918 : {'Q00582'},
854919 : {'P32340'},
854920 : {'Q03208'},
854921 : {'Q03210'},
854922 : {'Q03735'},
854924 : {'P13090'},
854925 : {'P23642'},
854926 : {'Q03750'},
854927 : {'P13483'},
854928 : {'P46963'},
854929 : {'Q03758'},
854930 : {'P49017'},
854931 : {'P54786'},
854932 : {'P54838'},
854933 : {'Q04636'},
854934 : {'Q04638'},
854935 : {'Q04651'},
854936 : {'Q04658'},
854937 : {'P54784'},
854938 : {'P38987'},
854939 : {'P23248'},
854940 : {'P33441'},
854941 : {'P07271'},
854942 : {'P53397'},
854943 : {'Q04958'},
854944 : {'Q6Q5K6'},
854945 : {'Q04964'},
854946 : {'P14747'},
854948 : {'P50094'},
854949 : {'Q04969'},
854950 : {'P00175'},
854952 : {'Q04978'},
854953 : {'P54003'},
854954 : {'P04387'},
854955 : {'Q04689'},
854956 : {'Q04693'},
854957 : {'Q04697'},
854959 : {'Q04705'},
854960 : {'P39682'},
854962 : {'Q04706'},
854963 : {'Q04711'},
854964 : {'Q04712'},
854965 : {'P32796'},
854966 : {'Q03433'},
854968 : {'P0CX74', 'P0CX75', 'P0CX76'},
854969 : {'Q03434'},
854970 : {'Q03697'},
854971 : {'Q03703'},
854972 : {'Q03705'},
854973 : {'P15274'},
854974 : {'Q03707'},
854976 : {'P06778'},
854977 : {'P32500'},
854978 : {'Q03713'},
854979 : {'Q03714'},
854980 : {'P34760'},
854981 : {'P34161'},
854982 : {'P0CX55', 'P0CX56'},
854983 : {'P51998'},
854984 : {'P02407'},
854985 : {'Q03718'},
854986 : {'P49435'},
854987 : {'P12887'},
854988 : {'Q03722'},
854989 : {'Q03723'},
854990 : {'Q03730'},
854991 : {'P50109'},
854992 : {'P26570'},
854993 : {'Q04226'},
854994 : {'P49957'},
854995 : {'Q04228'},
854997 : {'P54837'},
854998 : {'Q04231'},
854999 : {'P27692'},
855002 : {'P36533'},
855003 : {'P25087'},
855004 : {'Q3E7A6'},
855005 : {'P19880'},
855006 : {'Q04233'},
855008 : {'Q04235'},
855009 : {'P50107'},
855010 : {'P0CF16'},
855011 : {'P0CF17'},
855012 : {'P32939'},
855013 : {'P32562'},
855014 : {'Q03667'},
855015 : {'Q03673'},
855016 : {'P40959'},
855017 : {'P50105'},
855018 : {'Q03674'},
855020 : {'P39105'},
855021 : {'Q03677'},
855022 : {'Q03687'},
855023 : {'P23585'},
855025 : {'Q03690'},
855026 : {'P20048'},
855028 : {'Q04347'},
855029 : {'P54781'},
855030 : {'P53438'},
855031 : {'Q04359'},
855032 : {'Q04364'},
855033 : {'P50104'},
855034 : {'P50264'},
855035 : {'P35192'},
855036 : {'Q02159'},
855037 : {'P32559'},
855039 : {'P36516'},
855040 : {'Q04368'},
855041 : {'Q04370'},
855042 : {'Q04371'},
855043 : {'Q04372'},
855044 : {'Q05040'},
855045 : {'Q05043'},
855047 : {'Q05050'},
855048 : {'Q05080'},
855049 : {'Q05123'},
855050 : {'Q05131'},
855051 : {'P46972'},
855052 : {'P23748'},
855053 : {'P33748'},
855054 : {'P40202'},
855055 : {'P54000'},
855056 : {'Q04210'},
855057 : {'Q04212'},
855059 : {'P07249'},
855060 : {'P11746'},
855061 : {'Q04213'},
855062 : {'Q04214'},
855063 : {'Q04215'},
855066 : {'Q02630'},
855067 : {'Q04659'},
855068 : {'Q04660'},
855070 : {'Q04670'},
855071 : {'P0CX65', 'P0CX66', 'P0CX67', 'P0CX68', 'P0CX69'},
855073 : {'P46671'},
855075 : {'P46679'},
855076 : {'P37296'},
855077 : {'P26448'},
855078 : {'P04710'},
855080 : {'P38993'},
855081 : {'Q04675'},
855082 : {'P50110'},
855083 : {'P25298'},
855084 : {'Q04728'},
855085 : {'Q04734'},
855086 : {'P32493'},
855087 : {'Q04746'},
855088 : {'Q04748'},
855089 : {'P54730'},
855090 : {'Q04749'},
855091 : {'Q04751'},
855092 : {'P54785'},
855093 : {'Q04767'},
855094 : {'Q02486'},
855095 : {'Q04772'},
855096 : {'Q04773'},
855097 : {'Q04779'},
855099 : {'Q04264'},
855101 : {'Q04272'},
855102 : {'P49956'},
855103 : {'P24280'},
855104 : {'P30771'},
855105 : {'P32488'},
855107 : {'P07246'},
855108 : {'A2P2R3'},
855109 : {'P0CF18'},
855110 : {'Q04279'},
855112 : {'Q04299'},
855113 : {'Q04301'},
855114 : {'P40341'},
855115 : {'Q04304'},
855116 : {'P32832'},
855117 : {'P46680'},
855118 : {'Q04305'},
855119 : {'P35203'},
855120 : {'Q03144'},
855121 : {'Q03148'},
855122 : {'Q03151'},
855123 : {'Q03153'},
855125 : {'Q03161'},
855126 : {'Q03162'},
855127 : {'Q03175'},
855128 : {'Q03177'},
855130 : {'P18961'},
855131 : {'P37012'},
855132 : {'Q04437'},
855134 : {'Q04438'},
855135 : {'P07342'},
855136 : {'Q04439'},
855137 : {'Q04458'},
855138 : {'Q04461'},
855139 : {'Q99278'},
855140 : {'Q12676'},
855141 : {'Q04471'},
855142 : {'Q04472'},
855143 : {'P38011'},
855144 : {'Q04477'},
855145 : {'Q04487'},
855147 : {'P54074'},
855149 : {'P38009'},
855150 : {'P54780'},
855152 : {'Q3E842'},
855153 : {'Q03880'},
855154 : {'P39523'},
855155 : {'P34160'},
855156 : {'Q04216'},
855157 : {'P40963'},
855158 : {'Q04217'},
855159 : {'P39685'},
855160 : {'Q04223'},
855161 : {'Q04225'},
855162 : {'P40206'},
855163 : {'P32841'},
855164 : {'P40207'},
855166 : {'P40208'},
855167 : {'P40209'},
855168 : {'P30620'},
855169 : {'P39110'},
855170 : {'P38615'},
855171 : {'P40210'},
855173 : {'P40212'},
855174 : {'P0CX51', 'P0CX52'},
855175 : {'P40214'},
855176 : {'P40215'},
855177 : {'P40217'},
855178 : {'P40218'},
855179 : {'P40219'},
855180 : {'Q02795'},
855182 : {'P28627'},
855183 : {'P28625'},
855184 : {'Q03790'},
855186 : {'Q03792'},
855187 : {'Q03795'},
855188 : {'Q03796'},
855189 : {'Q03798'},
855190 : {'Q03799'},
855192 : {'Q3E843'},
855194 : {'Q03818'},
855195 : {'Q03823'},
855196 : {'P48353'},
855197 : {'Q12674'},
855198 : {'Q03824'},
855200 : {'Q03825'},
855201 : {'P32567'},
855202 : {'Q03829'},
855203 : {'P38920'},
855204 : {'P40969'},
855205 : {'P54114'},
855206 : {'P47771'},
855207 : {'Q03212'},
855208 : {'Q03213'},
855210 : {'P18899'},
855212 : {'P01094'},
855213 : {'P50263'},
855214 : {'Q03214'},
855215 : {'Q03218'},
855216 : {'Q03219'},
855217 : {'P35209'},
855218 : {'Q03220'},
855219 : {'Q03231'},
855220 : {'Q00453'},
855221 : {'P39926'},
855222 : {'Q03233'},
855223 : {'Q12751'},
855224 : {'P15108'},
855225 : {'Q03236'},
855226 : {'Q03246'},
855227 : {'P49095'},
855228 : {'P35187'},
855229 : {'P42933'},
855230 : {'Q04322'},
855231 : {'P36525'},
855232 : {'P05745'},
855235 : {'Q04329'},
855236 : {'Q04336'},
855237 : {'Q04338'},
855238 : {'Q01649'},
855239 : {'P20437'},
855240 : {'Q03691'},
855241 : {'P28519'},
855242 : {'P32352'},
855243 : {'P23644'},
855244 : {'Q03694'},
855245 : {'P16862'},
855246 : {'Q03695'},
855247 : {'P32874'},
855248 : {'P07277'},
855249 : {'Q03648'},
855250 : {'Q03649'},
855251 : {'Q03652'},
855252 : {'Q03653'},
855253 : {'Q03654'},
855254 : {'P25303'},
855255 : {'Q03655'},
855256 : {'Q03656'},
855257 : {'P38625'},
855258 : {'Q03660'},
855259 : {'Q03661'},
855260 : {'P24521'},
855261 : {'Q04991'},
855262 : {'Q05015'},
855263 : {'P50102'},
855264 : {'P32829'},
855265 : {'P19956'},
855266 : {'Q05016'},
855267 : {'Q05021'},
855268 : {'P14908'},
855269 : {'Q05022'},
855270 : {'P46784'},
855271 : {'P12868'},
855272 : {'Q05670'},
855273 : {'Q05024'},
855274 : {'Q04740'},
855275 : {'P11745'},
855276 : {'Q05027'},
855277 : {'Q05029'},
855278 : {'Q05031'},
855280 : {'Q02555'},
855281 : {'Q02554'},
855282 : {'Q04013'},
855283 : {'P0CX23', 'P0CX24'},
855284 : {'P20107'},
855285 : {'Q04018'},
855287 : {'Q3E846'},
855288 : {'P47912'},
855289 : {'Q04781'},
855291 : {'Q04792'},
855292 : {'Q04806'},
855293 : {'Q05827'},
855294 : {'Q04814'},
855295 : {'Q04835'},
855297 : {'Q04839'},
855298 : {'P10174'},
855299 : {'P08468'},
855300 : {'Q04847'},
855301 : {'Q03496'},
855302 : {'P38912'},
855303 : {'P38426'},
855304 : {'P38430'},
855305 : {'P38429'},
855306 : {'P38428'},
855307 : {'Q03508'},
855308 : {'Q03516'},
855309 : {'P28239'},
855310 : {'P49960'},
855311 : {'Q03525'},
855312 : {'P53437'},
855313 : {'P30402'},
855315 : {'Q03529'},
855316 : {'P50111'},
855317 : {'Q03530'},
855318 : {'P48524'},
855319 : {'P48510'},
855320 : {'Q03254'},
855321 : {'Q03262'},
855322 : {'Q03263'},
855323 : {'P39113'},
855324 : {'P23797'},
855325 : {'P22136'},
855326 : {'P23796'},
855328 : {'P32807'},
855329 : {'Q03264'},
855330 : {'P20084'},
855331 : {'P39112'},
855332 : {'P49955'},
855333 : {'Q03266'},
855335 : {'Q03532'},
855336 : {'Q03533'},
855337 : {'Q03554'},
855338 : {'Q03557'},
855339 : {'P36224'},
855341 : {'Q03559'},
855342 : {'P25045'},
855343 : {'P00729'},
855344 : {'Q03579'},
855345 : {'Q04949'},
855346 : {'P04046'},
855347 : {'P40416'},
855348 : {'P32843'},
855349 : {'P00331'},
855350 : {'P50101'},
855352 : {'Q04951'},
855353 : {'Q04952'},
855355 : {'P22146'},
855356 : {'P32337'},
855357 : {'P32497'},
855358 : {'Q04867'},
855359 : {'P41818'},
855360 : {'Q04868'},
855361 : {'P40308'},
855362 : {'P40302'},
855363 : {'Q04869'},
855364 : {'P54005'},
855365 : {'I2HB70'},
855366 : {'Q04893'},
855368 : {'Q04894'},
855369 : {'P40988'},
855371 : {'Q04898'},
855372 : {'Q04902'},
855373 : {'P42222'},
855375 : {'P0CE85'},
855377 : {'P53819'},
855380 : {'P53822'},
855381 : {'P0CH63', 'P0CH64'},
855382 : {'P53823'},
855383 : {'P53824'},
855384 : {'P42883'},
855385 : {'P42884'},
855386 : {'P32561'},
855387 : {'P33760'},
855388 : {'P42834'},
855389 : {'P42835'},
855390 : {'P42836'},
855392 : {'P42837'},
855393 : {'P42838'},
855394 : {'P17260'},
855395 : {'P42839'},
855396 : {'P42840'},
855398 : {'P42833'},
855399 : {'P42841'},
855400 : {'P32452'},
855401 : {'P32453'},
855402 : {'P21705'},
855403 : {'P42842'},
855404 : {'P26754'},
855405 : {'P42843'},
855406 : {'P42844'},
855407 : {'P42845'},
855408 : {'P42846'},
855409 : {'P21965'},
855410 : {'P42847'},
855411 : {'P48558'},
855412 : {'P48559'},
855414 : {'P07281'},
855415 : {'P0CX49', 'P0CX50'},
855416 : {'P48560'},
855417 : {'P48561'},
855418 : {'P48562'},
855420 : {'P48563'},
855421 : {'P48564'},
855422 : {'P48565'},
855423 : {'P48566'},
855424 : {'P48567'},
855425 : {'P41821'},
855426 : {'P38629'},
855427 : {'P24867'},
855428 : {'P53829'},
855429 : {'P32074'},
855430 : {'P53830'},
855433 : {'Q12112'},
855434 : {'Q12391'},
855436 : {'P36520'},
855438 : {'P53832'},
855439 : {'P53833'},
855440 : {'P53834'},
855441 : {'P32462'},
855442 : {'P53835'},
855443 : {'P53836'},
855444 : {'P08465'},
855446 : {'P53838'},
855447 : {'P53839'},
855448 : {'P53840'},
855449 : {'P17065'},
855450 : {'P41832'},
855451 : {'P38971'},
855452 : {'P53841'},
855453 : {'P32487'},
855454 : {'P39104'},
855456 : {'P53843'},
855457 : {'P53844'},
855458 : {'P53845'},
855459 : {'P21951'},
855460 : {'P50874'},
855461 : {'P53846'},
855462 : {'P38636'},
855463 : {'P53847'},
855464 : {'P38717'},
855465 : {'P53848'},
855466 : {'P53849'},
855467 : {'P53850'},
855468 : {'P53851'},
855469 : {'P36528'},
855470 : {'P53617'},
855471 : {'P12753'},
855472 : {'P53583'},
855473 : {'Q01080'},
855474 : {'P53852'},
855475 : {'P53853'},
855476 : {'P53854'},
855477 : {'P32911'},
855478 : {'P33338'},
855479 : {'P53855'},
855480 : {'P11412'},
855481 : {'P23503'},
855482 : {'Q01532'},
855483 : {'P13134'},
855484 : {'P53584'},
855485 : {'P32259'},
855487 : {'P53857'},
855488 : {'P53858'},
855489 : {'P53859'},
855490 : {'P53860'},
855491 : {'P53861'},
855492 : {'P23202'},
855495 : {'P53863'},
855496 : {'P53865'},
855497 : {'P53866'},
855498 : {'P53867'},
855499 : {'P53538'},
855500 : {'P41812'},
855501 : {'P80210'},
855502 : {'P53868'},
855503 : {'P40151'},
855504 : {'P40152'},
855505 : {'P11938'},
855506 : {'P40154'},
855507 : {'P40155'},
855508 : {'P40156'},
855509 : {'P40157'},
855510 : {'P53869'},
855511 : {'P16523'},
855512 : {'P40150'},
855513 : {'P40159'},
855514 : {'P40160'},
855515 : {'P40161'},
855517 : {'P32572'},
855518 : {'P32573'},
855520 : {'P40164'},
855521 : {'P40165'},
855522 : {'Q01722'},
855524 : {'P34761'},
855525 : {'P40167'},
855526 : {'P40168'},
855527 : {'P40169'},
855528 : {'P53870'},
855529 : {'P08004'},
855530 : {'P53871'},
855531 : {'P53872'},
855532 : {'Q02821'},
855533 : {'P11927'},
855534 : {'P53873'},
855535 : {'P53874'},
855536 : {'P53875'},
855537 : {'P53876'},
855538 : {'P22211'},
855539 : {'P53877'},
855540 : {'P53878'},
855541 : {'P53879'},
855543 : {'P05750'},
855544 : {'P53881'},
855545 : {'P53882'},
855547 : {'P53883'},
855548 : {'P53885'},
855549 : {'P53886'},
855552 : {'P39006'},
855553 : {'P53889'},
855554 : {'Q02100'},
855555 : {'P53890'},
855556 : {'P53891'},
855557 : {'P53892'},
855558 : {'P53893'},
855559 : {'Q3E7A8'},
855560 : {'P0CX27', 'P0CX28'},
855561 : {'P53894'},
855562 : {'P38616'},
855563 : {'P53895'},
855564 : {'P53896'},
855565 : {'P53897'},
855566 : {'P53898'},
855567 : {'P53899'},
855568 : {'P23292'},
855569 : {'P53900'},
855570 : {'P53901'},
855571 : {'P17890'},
855573 : {'P53903'},
855574 : {'P53904'},
855575 : {'P53905'},
855576 : {'P53906'},
855577 : {'P34166'},
855578 : {'P53907'},
855579 : {'P53908'},
855580 : {'P41948'},
855581 : {'P53909'},
855582 : {'P53910'},
855583 : {'P53552'},
855584 : {'P17555'},
855585 : {'P27929'},
855586 : {'P53911'},
855587 : {'P20081'},
855588 : {'P53912'},
855589 : {'P53913'},
855591 : {'P53914'},
855592 : {'P49334'},
855593 : {'P17898'},
855594 : {'P53915'},
855595 : {'P53916'},
855596 : {'P53917'},
855597 : {'P53540'},
855598 : {'P53918'},
855599 : {'P53919'},
855600 : {'P53920'},
855601 : {'P53921'},
855602 : {'P07213'},
855603 : {'P53923'},
855605 : {'P53550'},
855606 : {'P30952'},
855607 : {'P53924'},
855608 : {'P53925'},
855609 : {'P28000'},
855611 : {'P24783'},
855612 : {'P40312'},
855613 : {'P53927'},
855615 : {'P53929'},
855616 : {'P53930'},
855618 : {'P50942'},
855619 : {'P06208'},
855620 : {'P32389'},
855621 : {'P13382'},
855622 : {'P50944'},
855623 : {'P50945'},
855624 : {'P50946'},
855625 : {'P01120'},
855626 : {'P50947'},
855628 : {'P48164'},
855629 : {'P53932'},
855630 : {'P53933'},
855631 : {'P36019'},
855632 : {'P53934'},
855633 : {'P53935'},
855634 : {'P06781'},
855636 : {'P06786'},
855637 : {'P48231'},
855638 : {'P48232'},
855639 : {'P40850'},
855640 : {'P39013'},
855641 : {'D6W196'},
855642 : {'P14242'},
855643 : {'P53937'},
855644 : {'P53938'},
855645 : {'P17536'},
855646 : {'P53939'},
855647 : {'P53940'},
855648 : {'P34072'},
855649 : {'P53941'},
855650 : {'P32047'},
855651 : {'P32048'},
855652 : {'P53942'},
855653 : {'P12695'},
855654 : {'P53507'},
855655 : {'P26785'},
855656 : {'P41813'},
855658 : {'P51401'},
855659 : {'P53616'},
855660 : {'P53943'},
855661 : {'P25491'},
855662 : {'P53944'},
855663 : {'P41814'},
855664 : {'P40991'},
855665 : {'P53946'},
855667 : {'P53947'},
855668 : {'P53949'},
855669 : {'P04840'},
855671 : {'Q12470'},
855672 : {'Q99337'},
855673 : {'P53950'},
855674 : {'P38590'},
855675 : {'P00424'},
855676 : {'P53951'},
855677 : {'P53952'},
855678 : {'P53953'},
855679 : {'P53954'},
855680 : {'P53955'},
855681 : {'P53956'},
855682 : {'Q10740'},
855683 : {'P53633'},
855686 : {'P53958'},
855687 : {'P53959'},
855688 : {'P53960'},
855689 : {'P46678'},
855690 : {'P53961'},
855691 : {'P28834'},
855692 : {'P53615'},
855695 : {'P53962'},
855697 : {'P53963'},
855698 : {'P53964'},
855699 : {'P53965'},
855700 : {'P61830'},
855701 : {'P02309'},
855703 : {'P53966'},
855704 : {'P53968'},
855705 : {'P53969'},
855706 : {'P47821'},
855707 : {'P53970'},
855708 : {'P53971'},
855709 : {'P53972'},
855710 : {'P53973'},
855711 : {'P53974'},
855712 : {'P53975'},
855713 : {'P53976'},
855716 : {'P32588'},
855717 : {'P0CT04'},
855718 : {'P53978'},
855720 : {'P53541'},
855721 : {'P53980'},
855722 : {'P53981'},
855723 : {'P53982'},
855724 : {'P53983'},
855725 : {'P25294'},
855726 : {'P41318'},
855727 : {'P12687'},
855728 : {'P38922'},
855729 : {'P38921'},
855730 : {'P40693'},
855731 : {'P33309'},
855732 : {'P00890'},
855736 : {'P32907'},
855737 : {'P32910'},
855738 : {'P40342'},
855739 : {'P40343'},
855741 : {'P40344'},
855742 : {'P40345'},
855743 : {'P53718'},
855744 : {'P33308'},
855745 : {'P20095'},
855746 : {'P27515'},
855747 : {'P27514'},
855748 : {'P53719'},
855749 : {'P53720'},
855750 : {'Q00955'},
855751 : {'P32897'},
855752 : {'P53721'},
855753 : {'P53629'},
855754 : {'P53722'},
855755 : {'P53723'},
855756 : {'P53724'},
855757 : {'P53628'},
855758 : {'P53725'},
855760 : {'P11655'},
855761 : {'P53727'},
855762 : {'P53728'},
855763 : {'P53729'},
855764 : {'P53730'},
855765 : {'P53599'},
855766 : {'P32838'},
855767 : {'Q6Q546'},
855768 : {'P37254'},
855769 : {'P50278'},
855770 : {'Q3E841'},
855771 : {'P53731'},
855772 : {'P53732'},
855773 : {'P53733'},
855774 : {'P53734'},
855775 : {'P53735'},
855776 : {'P53736'},
855778 : {'P32378'},
855779 : {'P32377'},
855780 : {'P32323'},
855781 : {'P07390'},
855782 : {'P53738'},
855783 : {'P53739'},
855784 : {'P53740'},
855785 : {'P53604'},
855786 : {'P38999'},
855787 : {'P53741'},
855788 : {'P39008'},
855789 : {'P53742'},
855790 : {'P53743'},
855792 : {'P53389'},
855793 : {'P53744'},
855794 : {'P53630'},
855795 : {'P50277'},
855796 : {'P53745'},
855797 : {'P53746'},
855798 : {'P53747'},
855799 : {'P53748'},
855800 : {'P53749'},
855801 : {'P53750'},
855802 : {'P53751'},
855803 : {'P53752'},
855804 : {'P53753'},
855805 : {'P53754'},
855806 : {'P53755'},
855807 : {'P53756'},
855808 : {'P53757'},
855809 : {'P53631'},
855810 : {'P0CX08', 'P0CX09'},
855811 : {'P52923'},
855812 : {'P52924'},
855813 : {'P0CE90', 'P0CE91'},
855814 : {'P53758'},
855816 : {'P0CX70', 'P0CX71', 'P0CX72', 'P0CX73'},
855817 : {'Q12414'},
855818 : {'Q08974'},
855819 : {'P20438'},
855820 : {'Q12365'},
855821 : {'Q12060'},
855822 : {'Q12045'},
855824 : {'Q12184'},
855825 : {'Q12048'},
855826 : {'O14455'},
855827 : {'Q12344'},
855828 : {'P04386'},
855829 : {'Q12523'},
855830 : {'Q12270'},
855831 : {'Q12179'},
855832 : {'Q12520'},
855833 : {'P38687'},
855834 : {'Q12280'},
855835 : {'P46670'},
855836 : {'P02829'},
855837 : {'P46683'},
855838 : {'P09064'},
855840 : {'Q12003'},
855841 : {'Q12464'},
855842 : {'P32842'},
855843 : {'Q12143'},
855844 : {'P32867'},
855845 : {'P19097'},
855846 : {'P0CX14', 'P0CX15'},
855847 : {'P0CE86', 'P0CE87'},
855848 : {'P0CX10', 'P0CX11'},
855849 : {'Q08992'},
855850 : {'Q08991'},
855851 : {'Q08990'},
855852 : {'Q08989'},
855854 : {'Q08986'},
855855 : {'Q08985'},
855856 : {'Q08984'},
855857 : {'P21306'},
855858 : {'P33311'},
855859 : {'P32526'},
855860 : {'P32383'},
855861 : {'Q08981'},
855862 : {'P41819'},
855863 : {'P53388'},
855864 : {'Q08980'},
855865 : {'Q08979'},
855866 : {'P08417'},
855867 : {'Q08977'},
855869 : {'Q00776'},
855870 : {'Q08975'},
855871 : {'Q12132'},
855872 : {'Q99395'},
855873 : {'O13297'},
855874 : {'P40350'},
855875 : {'Q08972'},
855876 : {'Q08971'},
855877 : {'Q08970'},
855878 : {'Q08969'},
855879 : {'Q08968'},
855880 : {'Q08967'},
855881 : {'P0CX43', 'P0CX44'},
855882 : {'Q08966'},
855883 : {'P20606'},
855884 : {'Q08965'},
855885 : {'Q08964'},
855886 : {'P21560'},
855887 : {'P41835'},
855888 : {'Q08963'},
855889 : {'Q12211'},
855890 : {'Q08962'},
855891 : {'P38688'},
855892 : {'P38991'},
855893 : {'Q08961'},
855894 : {'Q08960'},
855895 : {'Q08959'},
855897 : {'P29295'},
855898 : {'P06245'},
855899 : {'Q08957'},
855900 : {'Q08956'},
855901 : {'Q08955'},
855902 : {'Q08954'},
855903 : {'Q12213'},
855905 : {'Q08952'},
855906 : {'Q08951'},
855907 : {'Q08949'},
855908 : {'Q08932'},
855909 : {'Q08931'},
855910 : {'Q08930'},
855911 : {'P38996'},
855912 : {'Q08929'},
855913 : {'Q06892'},
855914 : {'P01149'},
855916 : {'Q08926'},
855917 : {'Q08925'},
855918 : {'O14464'},
855919 : {'Q08924'},
855920 : {'Q08923'},
855922 : {'Q08921'},
855923 : {'P32945'},
855925 : {'Q08920'},
855926 : {'P41817'},
855927 : {'Q08919'},
855928 : {'P32363'},
855929 : {'P33420'},
855930 : {'P36534'},
855931 : {'P21592'},
855932 : {'P41816'},
855933 : {'Q12091'},
855934 : {'Q99257'},
855935 : {'Q12467'},
855936 : {'P14284'},
855937 : {'Q12092'},
855938 : {'Q12529'},
855939 : {'Q12083'},
855940 : {'Q12254'},
855941 : {'Q12042'},
855942 : {'P39011'},
855943 : {'P26637'},
855944 : {'Q99373'},
855945 : {'Q99299'},
855946 : {'Q12052'},
855947 : {'Q12498'},
855948 : {'P28743'},
855949 : {'P07267'},
855950 : {'P22216'},
855951 : {'Q12461'},
855952 : {'Q12417'},
855953 : {'Q12152'},
855954 : {'Q12380'},
855955 : {'Q12036'},
855956 : {'P41909'},
855957 : {'Q12080'},
855958 : {'P35844'},
855959 : {'Q12245'},
855960 : {'P05744'},
855962 : {'Q03002'},
855963 : {'P32491'},
855964 : {'Q03010'},
855965 : {'Q03012'},
855967 : {'Q03016'},
855968 : {'Q03020'},
855969 : {'Q03028'},
855970 : {'P19541'},
855971 : {'P19516'},
855972 : {'P26321'},
855973 : {'Q03029'},
855974 : {'P35189'},
855975 : {'Q02457'},
855976 : {'P53551'},
855977 : {'Q02931'},
855978 : {'Q02932'},
855979 : {'P33419'},
855980 : {'Q02933'},
855981 : {'Q02939'},
855982 : {'P32489'},
855983 : {'Q02948'},
855984 : {'P24784'},
855985 : {'Q02950'},
855986 : {'P15496'},
855987 : {'Q02959'},
855988 : {'P32873'},
855990 : {'Q02961'},
855991 : {'Q02969'},
855993 : {'P00812'},
855994 : {'Q02979'},
855995 : {'Q02981'},
855996 : {'Q02872'},
855997 : {'Q02873'},
855998 : {'P32589'},
855999 : {'Q02875'},
856000 : {'P15179'},
856001 : {'Q02883'},
856002 : {'Q02884'},
856004 : {'Q02887'},
856005 : {'Q02888'},
856006 : {'Q02889'},
856007 : {'P48527'},
856008 : {'P62651'},
856009 : {'Q02890'},
856010 : {'Q02891'},
856011 : {'P21825'},
856012 : {'Q02892'},
856013 : {'P41930'},
856014 : {'P41921'},
856015 : {'P0CX37', 'P0CX38'},
856016 : {'Q12224'},
856017 : {'Q02895'},
856018 : {'Q02896'},
856019 : {'Q02908'},
856020 : {'P48415'},
856021 : {'P48582'},
856022 : {'Q02825'},
856023 : {'P32333'},
856024 : {'O13516'},
856026 : {'Q12672'},
856027 : {'P05626'},
856028 : {'Q02831'},
856029 : {'P46961'},
856030 : {'P07261'},
856031 : {'P40328'},
856032 : {'Q02863'},
856034 : {'Q02864'},
856035 : {'Q02866'},
856036 : {'Q12051'},
856037 : {'Q02749'},
856038 : {'Q02754'},
856039 : {'Q12194'},
856040 : {'Q02767'},
856041 : {'Q02770'},
856042 : {'Q02776'},
856044 : {'P54115'},
856045 : {'Q02783'},
856047 : {'A0A0B7P3V8'},
856048 : {'Q02784'},
856049 : {'Q02785'},
856050 : {'P33300'},
856051 : {'Q02786'},
856052 : {'Q02796'},
856053 : {'Q02799'},
856054 : {'P54070'},
856055 : {'Q02803'},
856056 : {'Q02804'},
856057 : {'P39107'},
856058 : {'Q03063'},
856059 : {'P29547'},
856060 : {'Q03067'},
856061 : {'Q03071'},
856062 : {'Q03308'},
856063 : {'P37838'},
856065 : {'P39073'},
856066 : {'Q03079'},
856067 : {'P48526'},
856068 : {'Q03080'},
856069 : {'Q03081'},
856070 : {'Q02642'},
856071 : {'P19657'},
856072 : {'Q03083'},
856074 : {'Q03085'},
856075 : {'Q03088'},
856076 : {'P17157'},
856077 : {'Q02648'},
856078 : {'P32580'},
856079 : {'P41338'},
856080 : {'Q02651'},
856081 : {'Q12505'},
856083 : {'Q02685'},
856084 : {'P46151'},
856085 : {'P06777'},
856086 : {'Q02710'},
856087 : {'Q02724'},
856088 : {'Q02725'},
856089 : {'Q02732'},
856090 : {'Q02733'},
856091 : {'P09547'},
856092 : {'P53686'},
856093 : {'Q02606'},
856094 : {'Q02608'},
856095 : {'Q12754'},
856096 : {'Q12297'},
856097 : {'P53600'},
856098 : {'Q12532'},
856099 : {'P22516'},
856100 : {'Q12308'},
856101 : {'Q12200'},
856102 : {'Q12089'},
856103 : {'Q12230'},
856104 : {'Q12059'},
856105 : {'Q12483'},
856106 : {'Q12341'},
856107 : {'P43635'},
856108 : {'Q12428'},
856111 : {'P53394'},
856112 : {'Q12480'},
856113 : {'Q01766'},
856114 : {'Q12031'},
856115 : {'Q12188'},
856117 : {'Q12753'},
856118 : {'Q12286'},
856119 : {'P22138'},
856121 : {'Q12251'},
856123 : {'Q12145'},
856125 : {'Q12531'},
856126 : {'Q12522'},
856128 : {'P32601'},
856129 : {'Q12495'},
856130 : {'P30665'},
856131 : {'Q12233'},
856132 : {'Q12482'},
856133 : {'Q12139'},
856134 : {'Q12432'},
856135 : {'P32795'},
856136 : {'P37366'},
856137 : {'P48016'},
856138 : {'Q12079'},
856140 : {'Q12402'},
856141 : {'Q12028'},
856142 : {'Q12734'},
856143 : {'Q12311'},
856144 : {'Q12038'},
856145 : {'P07263'},
856146 : {'Q12406'},
856147 : {'P32288'},
856148 : {'P41807'},
856149 : {'Q3E752'},
856152 : {'Q12284'},
856153 : {'Q12199'},
856154 : {'P38431'},
856155 : {'Q12221'},
856156 : {'P0CX25', 'P0CX26'},
856158 : {'Q12049'},
856159 : {'Q12262'},
856160 : {'P08425'},
856161 : {'Q12181'},
856162 : {'Q12527'},
856163 : {'Q03503'},
856165 : {'P11632'},
856167 : {'P41808'},
856168 : {'P32855'},
856169 : {'Q12004'},
856170 : {'Q99177'},
856171 : {'P32331'},
856173 : {'P32178'},
856174 : {'Q12350'},
856175 : {'Q12178'},
856176 : {'Q12160'},
856178 : {'P25042'},
856179 : {'Q99344'},
856180 : {'Q12425'},
856181 : {'Q12214'},
856182 : {'Q12074'},
856183 : {'Q12321'},
856184 : {'Q12346'},
856186 : {'Q12514'},
856187 : {'P40347'},
856188 : {'P23254'},
856191 : {'Q06810'},
856193 : {'Q06813'},
856194 : {'Q06815'},
856195 : {'P02994'},
856196 : {'Q06817'},
856197 : {'Q06819'},
856198 : {'Q06820'},
856199 : {'Q06821'},
856200 : {'Q06822'},
856201 : {'P29055'},
856203 : {'P20424'},
856204 : {'O13585'},
856207 : {'Q06833'},
856208 : {'Q06834'},
856209 : {'Q06835'},
856210 : {'Q06836'},
856211 : {'O13587'},
856212 : {'Q06839'},
856213 : {'Q06089'},
856214 : {'Q06090'},
856216 : {'Q06091'},
856217 : {'P0C0W9'},
856218 : {'P30656'},
856219 : {'P39521'},
856220 : {'Q06096'},
856221 : {'Q06098'},
856222 : {'Q06102'},
856223 : {'Q06103'},
856225 : {'Q06104'},
856226 : {'P07703'},
856227 : {'P32328'},
856228 : {'Q06106'},
856229 : {'P06197'},
856230 : {'Q06107'},
856231 : {'Q06108'},
856232 : {'Q06109'},
856233 : {'Q06116'},
856234 : {'Q06489'},
856236 : {'P24869'},
856237 : {'P30283'},
856239 : {'Q06490'},
856240 : {'P40851'},
856241 : {'P49573'},
856243 : {'Q06493'},
856245 : {'Q06494'},
856246 : {'Q06497'},
856247 : {'P45978'},
856249 : {'Q06504'},
856250 : {'P0CX29', 'P0CX30'},
856251 : {'Q06505'},
856252 : {'P80967'},
856253 : {'P08593'},
856254 : {'Q01454'},
856255 : {'Q06506'},
856257 : {'P0C2I9'},
856258 : {'P0CX57', 'P0CX58'},
856260 : {'P53390'},
856261 : {'Q06508'},
856262 : {'Q06510'},
856263 : {'P17119'},
856264 : {'Q06511'},
856267 : {'Q06512'},
856268 : {'P49089'},
856270 : {'Q06522'},
856271 : {'Q06523'},
856272 : {'Q12207'},
856274 : {'Q06524'},
856275 : {'Q06525'},
856276 : {'Q06537'},
856277 : {'Q06449'},
856278 : {'Q12374'},
856279 : {'Q06451'},
856280 : {'Q06466'},
856281 : {'Q06469'},
856282 : {'P0CX59', 'P0CX60'},
856283 : {'P0C2J0'},
856284 : {'P0C2J1'},
856285 : {'Q6Q5H1'},
856287 : {'P32486'},
856289 : {'P06738'},
856290 : {'P23293'},
856291 : {'P54791'},
856292 : {'P34167'},
856293 : {'Q06211'},
856294 : {'P06780'},
856295 : {'P10663'},
856296 : {'P18408'},
856297 : {'Q06213'},
856298 : {'Q06214'},
856301 : {'Q06604'},
856302 : {'Q06608'},
856303 : {'P52917'},
856304 : {'Q06616'},
856305 : {'P24482'},
856306 : {'P20133'},
856307 : {'P20053'},
856309 : {'Q06623'},
856310 : {'Q06624'},
856311 : {'P15303'},
856312 : {'P54999'},
856313 : {'P14020'},
856314 : {'Q06625'},
856315 : {'Q06628'},
856316 : {'P39933'},
856317 : {'P20435'},
856318 : {'Q06580'},
856319 : {'P17883'},
856320 : {'P32349'},
856321 : {'P07257'},
856322 : {'P0CD91'},
856323 : {'Q06592'},
856324 : {'Q06593'},
856326 : {'Q06595'},
856327 : {'P33335'},
856329 : {'Q06596'},
856330 : {'Q06597'},
856331 : {'Q06598'},
856332 : {'Q08993'},
856333 : {'Q08994'},
856334 : {'Q08995'},
856335 : {'P38721'},
856336 : {'P38722'},
856337 : {'P38723'},
856338 : {'P38724'},
856339 : {'P38725'},
856341 : {'P38727'},
856342 : {'P38728'},
856343 : {'P38729'},
856345 : {'P38731'},
856346 : {'P38732'},
856347 : {'P03874'},
856349 : {'P38734'},
856350 : {'P38735'},
856351 : {'P10080'},
856352 : {'P17076'},
856353 : {'P32190'},
856354 : {'P38736'},
856355 : {'P38737'},
856356 : {'P38738'},
856357 : {'P38739'},
856358 : {'P33400'},
856359 : {'P38740'},
856360 : {'P18888'},
856361 : {'P38741'},
856362 : {'P38742'},
856364 : {'P23179'},
856365 : {'P23180'},
856366 : {'P21957'},
856367 : {'P38700'},
856368 : {'P38744'},
856369 : {'P38745'},
856370 : {'P33413'},
856371 : {'P38701'},
856372 : {'P38746'},
856373 : {'P38747'},
856374 : {'P38709'},
856375 : {'P38689'},
856376 : {'P38748'},
856377 : {'P38749'},
856379 : {'Q6Q5P6'},
856380 : {'P0C2J7'},
856381 : {'P38750'},
856382 : {'Q03497'},
856383 : {'P38751'},
856384 : {'P32902'},
856386 : {'P38703'},
856387 : {'P38753'},
856388 : {'P38754'},
856389 : {'P38755'},
856390 : {'P37299'},
856391 : {'P38702'},
856392 : {'P38756'},
856393 : {'P38757'},
856394 : {'P08539'},
856395 : {'P87108'},
856397 : {'P38704'},
856398 : {'P10614'},
856399 : {'P00447'},
856400 : {'P38758'},
856401 : {'P0C2H6'},
856402 : {'P38705'},
856403 : {'P38759'},
856404 : {'P07347'},
856405 : {'P23624'},
856408 : {'P38760'},
856409 : {'P32793'},
856410 : {'P32792'},
856411 : {'P04076'},
856412 : {'P38707'},
856413 : {'P38708'},
856415 : {'P38711'},
856416 : {'O13529'},
856417 : {'P38763'},
856418 : {'P08964'},
856419 : {'P11914'},
856420 : {'P17423'},
856421 : {'P23968'},
856422 : {'P38764'},
856423 : {'P18962'},
856424 : {'P38765'},
856425 : {'Q00772'},
856426 : {'P38766'},
856427 : {'P38767'},
856428 : {'P38690'},
856429 : {'P38768'},
856430 : {'P38769'},
856431 : {'P38770'},
856432 : {'P07275'},
856433 : {'P38771'},
856434 : {'P38694'},
856435 : {'P48836'},
856436 : {'P38772'},
856437 : {'P34162'},
856438 : {'P16603'},
856439 : {'P38773'},
856440 : {'P38774'},
856441 : {'P38775'},
856442 : {'P38710'},
856443 : {'P37898'},
856444 : {'P38776'},
856445 : {'P38777'},
856447 : {'P38778'},
856448 : {'P00427'},
856449 : {'P38779'},
856450 : {'P0CX80', 'P0CX81'},
856451 : {'P38780'},
856452 : {'P0CX80', 'P0CX81'},
856453 : {'P38781'},
856454 : {'P23285'},
856455 : {'P38782'},
856456 : {'P38783'},
856457 : {'P38784'},
856458 : {'P38785'},
856459 : {'P38786'},
856460 : {'P38787'},
856461 : {'P38788'},
856462 : {'P38712'},
856463 : {'P38789'},
856464 : {'P38790'},
856465 : {'P38791'},
856466 : {'P38792'},
856467 : {'P38793'},
856468 : {'P38794'},
856470 : {'P38604'},
856471 : {'Q6Q547'},
856472 : {'P38713'},
856473 : {'P38795'},
856474 : {'P38796'},
856475 : {'P38797'},
856476 : {'P38798'},
856477 : {'P38799'},
856478 : {'P32361'},
856479 : {'P89114'},
856480 : {'P38800'},
856481 : {'P38801'},
856482 : {'P38691'},
856483 : {'P14693'},
856484 : {'P13574'},
856485 : {'P38803'},
856486 : {'Q00539'},
856487 : {'P38804'},
856488 : {'P38805'},
856489 : {'P28007'},
856490 : {'P38806'},
856491 : {'P38714'},
856492 : {'P32467'},
856494 : {'P32465'},
856496 : {'P38695'},
856497 : {'P38809'},
856498 : {'P38810'},
856499 : {'P38811'},
856500 : {'P38812'},
856501 : {'P38813'},
856502 : {'P38692'},
856503 : {'P38814'},
856504 : {'P38715'},
856505 : {'P38815'},
856506 : {'P38816'},
856507 : {'P32468'},
856508 : {'P38817'},
856509 : {'P38818'},
856510 : {'P38819'},
856511 : {'P38820'},
856512 : {'P38716'},
856513 : {'P38821'},
856514 : {'P38822'},
856515 : {'P38823'},
856516 : {'P38824'},
856517 : {'P38825'},
856518 : {'P38826'},
856519 : {'P38827'},
856520 : {'P25846'},
856521 : {'P38828'},
856522 : {'P38829'},
856523 : {'P22140'},
856524 : {'P38830'},
856527 : {'P38832'},
856528 : {'P38833'},
856529 : {'P18562'},
856530 : {'P38696'},
856532 : {'P38835'},
856533 : {'P38836'},
856534 : {'Q9P305'},
856535 : {'P38837'},
856536 : {'P38838'},
856537 : {'P23291'},
856538 : {'P38839'},
856539 : {'P38840'},
856540 : {'P38841'},
856541 : {'P13130'},
856543 : {'P38842'},
856544 : {'P0CX27', 'P0CX28'},
856545 : {'P38843'},
856546 : {'P38844'},
856547 : {'P40422'},
856548 : {'P06773'},
856551 : {'P38845'},
856552 : {'P32904'},
856553 : {'P32899'},
856554 : {'P32900'},
856555 : {'P38848'},
856556 : {'P38849'},
856557 : {'P17123'},
856558 : {'P17122'},
856559 : {'P38850'},
856560 : {'P38851'},
856561 : {'P38852'},
856562 : {'P33323'},
856563 : {'P38853'},
856564 : {'P38854'},
856565 : {'P38855'},
856566 : {'P38856'},
856567 : {'P38857'},
856568 : {'P38858'},
856569 : {'P38859'},
856570 : {'P33334'},
856571 : {'P16522'},
856572 : {'O13539'},
856573 : {'P38860'},
856574 : {'P38719'},
856575 : {'P38861'},
856576 : {'P38862'},
856577 : {'P38863'},
856578 : {'P38864'},
856579 : {'P00925'},
856580 : {'P38865'},
856581 : {'P38866'},
856582 : {'P38867'},
856583 : {'P38699'},
856584 : {'Q03558'},
856587 : {'P38869'},
856588 : {'P38870'},
856589 : {'P38720'},
856590 : {'P38871'},
856591 : {'P38872'},
856593 : {'P38873'},
856594 : {'P38874'},
856595 : {'P38875'},
856596 : {'P38876'},
856597 : {'P29704'},
856598 : {'P38877'},
856599 : {'P38878'},
856600 : {'P38879'},
856601 : {'P38880'},
856602 : {'P38881'},
856603 : {'P38882'},
856604 : {'P38883'},
856605 : {'P38884'},
856606 : {'P38885'},
856607 : {'P38886'},
856608 : {'P38698'},
856609 : {'P38887'},
856610 : {'P0CX35', 'P0CX36'},
856611 : {'P38888'},
856612 : {'P11792'},
856613 : {'P38889'},
856614 : {'P38890'},
856615 : {'P38891'},
856616 : {'P38892'},
856617 : {'P38893'},
856618 : {'P38894'},
856620 : {'P0CI66', 'P0CI67'},
856621 : {'P0CX18', 'P0CX19'},
856623 : {'O13535'},
856624 : {'P0C2I4'},
856625 : {'P38693'},
856626 : {'P38697'},
856628 : {'P38899'},
856629 : {'P38900'},
856630 : {'Q3E7X8'},
856632 : {'P0CX16', 'P0CX17'},
856633 : {'P39971'},
856634 : {'P39972'},
856636 : {'P39974'},
856637 : {'P39975'},
856638 : {'P39976'},
856639 : {'P0CX08', 'P0CX09'},
856640 : {'P39924'},
856641 : {'P39977'},
856642 : {'P39979'},
856643 : {'P39978'},
856644 : {'P39980'},
856645 : {'P39981'},
856646 : {'P04817'},
856647 : {'P39923'},
856648 : {'P27895'},
856649 : {'P09232'},
856650 : {'Q05676'},
856652 : {'P38628'},
856653 : {'P39983'},
856654 : {'P39984'},
856655 : {'P39985'},
856656 : {'P0CX53', 'P0CX54'},
856657 : {'Q02197'},
856658 : {'P32317'},
856659 : {'P32610'},
856660 : {'P32611'},
856662 : {'P32612'},
856663 : {'P32613'},
856664 : {'P32614'},
856665 : {'P37303'},
856667 : {'P32617'},
856668 : {'P32618'},
856669 : {'P32621'},
856670 : {'P32622'},
856671 : {'P32623'},
856672 : {'P00045'},
856673 : {'P32626'},
856674 : {'P32628'},
856675 : {'P32629'},
856676 : {'P32630'},
856677 : {'P23301'},
856678 : {'P32633'},
856680 : {'P24279'},
856681 : {'P39986'},
856682 : {'P39987'},
856683 : {'P39988'},
856686 : {'P25515'},
856687 : {'P39990'},
856688 : {'P39991'},
856689 : {'P08067'},
856690 : {'P39992'},
856691 : {'P39993'},
856692 : {'P03962'},
856693 : {'O74700'},
856694 : {'P39994'},
856695 : {'P38632'},
856696 : {'P39995'},
856697 : {'P40975'},
856698 : {'P39996'},
856699 : {'P39997'},
856700 : {'P39998'},
856702 : {'P39968'},
856704 : {'P28263'},
856705 : {'P32775'},
856709 : {'P03069'},
856711 : {'P40002'},
856712 : {'P39953'},
856713 : {'P40003'},
856714 : {'P40004'},
856715 : {'P40005'},
856716 : {'P33767'},
856717 : {'P40006'},
856718 : {'P39106'},
856719 : {'P40007'},
856720 : {'P29952'},
856721 : {'P40008'},
856722 : {'P40009'},
856723 : {'P40010'},
856724 : {'P39937'},
856725 : {'P89886'},
856726 : {'P33332'},
856727 : {'P33331'},
856728 : {'P40011'},
856729 : {'P10863'},
856731 : {'P22141'},
856732 : {'P24384'},
856733 : {'P40012'},
856734 : {'P39518'},
856735 : {'P85052'},
856736 : {'P40013'},
856737 : {'P39925'},
856738 : {'P40014'},
856739 : {'P40015'},
856740 : {'P52871'},
856741 : {'P10823'},
856742 : {'P40016'},
856743 : {'P32569'},
856744 : {'P32263'},
856745 : {'P40017'},
856746 : {'P32481'},
856748 : {'P08456'},
856749 : {'Q04739'},
856750 : {'P39943'},
856751 : {'P40018'},
856752 : {'P40019'},
856753 : {'P38555'},
856754 : {'P40020'},
856755 : {'P40021'},
856756 : {'P40022'},
856757 : {'P40023'},
856758 : {'P40024'},
856759 : {'P40025'},
856760 : {'P40026'},
856761 : {'P0CE11'},
856762 : {'P0CD97'},
856763 : {'P18494'},
856764 : {'P40028'},
856765 : {'P40029'},
856766 : {'P39954'},
856767 : {'P40030'},
856768 : {'P29467'},
856769 : {'P39970'},
856770 : {'P40031'},
856771 : {'P39955'},
856772 : {'P39101'},
856774 : {'Q6Q560'},
856775 : {'P40032'},
856776 : {'P40033'},
856777 : {'P40034'},
856778 : {'P10869'},
856779 : {'P40035'},
856780 : {'Q3E7B0'},
856781 : {'P40036'},
856782 : {'P00498'},
856783 : {'P17064'},
856784 : {'P87262'},
856785 : {'P40037'},
856786 : {'Q02771'},
856787 : {'P40038'},
856788 : {'P40039'},
856789 : {'Q12119'},
856790 : {'P39525'},
856791 : {'P40106'},
856792 : {'P40040'},
856793 : {'P40041'},
856794 : {'P28240'},
856796 : {'P40042'},
856797 : {'P40043'},
856799 : {'P34909'},
856800 : {'Q01217'},
856801 : {'P21524'},
856802 : {'P40045'},
856803 : {'P40046'},
856804 : {'P40047'},
856805 : {'P0CX31', 'P0CX32'},
856806 : {'Q3E834'},
856807 : {'P40048'},
856809 : {'P40049'},
856810 : {'P40050'},
856811 : {'P40051'},
856812 : {'P40052'},
856813 : {'P40053'},
856814 : {'P40054'},
856815 : {'P40055'},
856816 : {'P40057'},
856817 : {'P40056'},
856818 : {'P40058'},
856819 : {'P00927'},
856820 : {'P39965'},
856821 : {'P52870'},
856822 : {'P40059'},
856823 : {'P39966'},
856824 : {'P00899'},
856825 : {'P05694'},
856827 : {'P40060'},
856828 : {'P40061'},
856829 : {'P87275'},
856830 : {'P25451'},
856831 : {'P25454'},
856832 : {'P39000'},
856835 : {'P39967'},
856836 : {'P38620'},
856837 : {'P33296'},
856838 : {'P39945'},
856839 : {'P0CX39', 'P0CX40'},
856840 : {'P22202'},
856841 : {'P40063'},
856842 : {'P40064'},
856843 : {'P40065'},
856844 : {'P40066'},
856846 : {'P40069'},
856847 : {'P25302'},
856848 : {'P40070'},
856849 : {'P40071'},
856850 : {'P39969'},
856851 : {'Q01684'},
856852 : {'P40072'},
856853 : {'P0CX41', 'P0CX42'},
856854 : {'P40073'},
856855 : {'P40074'},
856856 : {'P40075'},
856858 : {'P40076'},
856859 : {'P38682'},
856860 : {'P39962'},
856861 : {'P40077'},
856862 : {'P39940'},
856863 : {'P40078'},
856864 : {'P40079'},
856865 : {'P40080'},
856866 : {'P38990'},
856867 : {'P39959'},
856868 : {'P39939'},
856869 : {'P32634'},
856870 : {'P32598'},
856873 : {'P40081'},
856876 : {'P39958'},
856877 : {'P40083'},
856879 : {'Q03612'},
856880 : {'P0CX70', 'P0CX71', 'P0CX72', 'P0CX73'},
856881 : {'P0C270'},
856882 : {'P40084'},
856883 : {'P40085'},
856884 : {'P40086'},
856885 : {'P22134'},
856886 : {'P40087'},
856887 : {'P39944'},
856888 : {'P40088'},
856889 : {'P40089'},
856890 : {'P40090'},
856891 : {'P13393'},
856892 : {'P40091'},
856893 : {'P40092'},
856895 : {'Q01477'},
856896 : {'P10356'},
856897 : {'P10355'},
856898 : {'P39952'},
856899 : {'P39960'},
856900 : {'P40093'},
856901 : {'P40094'},
856903 : {'P40095'},
856904 : {'P40096'},
856906 : {'Q03619'},
856907 : {'P0CX65', 'P0CX66', 'P0CX67', 'P0CX68', 'P0CX69'},
856908 : {'P06843'},
856909 : {'P14736'},
856910 : {'P32656'},
856911 : {'P32657'},
856912 : {'P04147'},
856913 : {'P32660'},
856914 : {'P33306'},
856915 : {'P21269'},
856916 : {'P39956'},
856917 : {'P26364'},
856918 : {'P06839'},
856919 : {'P32639'},
856920 : {'P32641'},
856921 : {'P32642'},
856922 : {'P32643'},
856923 : {'P32644'},
856924 : {'P29311'},
856925 : {'P16387'},
856926 : {'P25453'},
856927 : {'P32645'},
856928 : {'Q3E784'},
856931 : {'P40098'},
856932 : {'P40099'},
856933 : {'P39961'},
856934 : {'P40100'},
856935 : {'P40101'},
856937 : {'P40102'},
856939 : {'P40104'},
856940 : {'P40105'},
1466398 : {'Q3E7Z8'},
1466399 : {'Q3E830'},
1466400 : {'Q3E819'},
1466401 : {'Q3E817'},
1466402 : {'Q8TGU2'},
1466403 : {'Q3E814'},
1466404 : {'Q8TGJ7'},
1466409 : {'Q3E813'},
1466410 : {'P0CY02', 'P0CY03', 'P0CY04', 'P0CY05'},
1466411 : {'P0CY02', 'P0CY03', 'P0CY04', 'P0CY05'},
1466412 : {'P0CE99'},
1466413 : {'P0CF00'},
1466414 : {'P0CY02', 'P0CY03', 'P0CY04', 'P0CY05'},
1466415 : {'P0CY02', 'P0CY03', 'P0CY04', 'P0CY05'},
1466416 : {'Q3E811'},
1466417 : {'Q3E732'},
1466418 : {'Q3E771'},
1466419 : {'Q3E825'},
1466421 : {'Q3E810'},
1466422 : {'Q3E795'},
1466423 : {'Q8TGT1'},
1466424 : {'Q3E742'},
1466426 : {'Q8TGK6'},
1466427 : {'Q3E791'},
1466428 : {'Q3E741'},
1466430 : {'Q8TGV0'},
1466431 : {'Q3E829'},
1466432 : {'Q3E7Z7'},
1466433 : {'Q3E789'},
1466434 : {'Q3E796'},
1466435 : {'Q3E818'},
1466436 : {'Q3E763'},
1466437 : {'P0C1Z1'},
1466438 : {'Q3E6R4'},
1466439 : {'P0C269'},
1466440 : {'Q8TGU8'},
1466441 : {'P0C268'},
1466442 : {'Q3E821'},
1466443 : {'I2HB52'},
1466444 : {'Q3E794'},
1466445 : {'Q6WNK7'},
1466446 : {'Q8TGU6'},
1466447 : {'Q3E820'},
1466448 : {'Q3E778'},
1466449 : {'Q3E755'},
1466450 : {'Q3E781'},
1466451 : {'Q8TGU5'},
1466452 : {'Q8TGK4'},
1466453 : {'Q8TGU1'},
1466454 : {'Q3E750'},
1466455 : {'Q8TGU0'},
1466456 : {'Q3E802'},
1466457 : {'Q45U48'},
1466458 : {'Q3E816'},
1466459 : {'Q8TGT9'},
1466460 : {'Q3E772'},
1466461 : {'Q8TGT8'},
1466462 : {'Q8TGT7'},
1466463 : {'Q3E786'},
1466464 : {'Q3E801'},
1466465 : {'Q3E7A3'},
1466466 : {'Q3E828'},
1466467 : {'Q8TGT3'},
1466468 : {'Q3E737'},
1466469 : {'Q3E827'},
1466470 : {'Q3E743'},
1466471 : {'Q3E775'},
1466472 : {'Q8TGJ1'},
1466473 : {'Q3E7A5'},
1466474 : {'Q3E7Y8'},
1466475 : {'Q3E7Y9'},
1466476 : {'Q3E7Z9'},
1466477 : {'Q8TGS2'},
1466479 : {'Q3E807'},
1466480 : {'Q3E824'},
1466481 : {'Q8TGS1'},
1466482 : {'Q3E735'},
1466483 : {'Q3E832'},
1466484 : {'Q8TGS0'},
1466485 : {'Q3E736'},
1466486 : {'Q3E7Y7'},
1466487 : {'Q3E806'},
1466488 : {'Q3E805'},
1466489 : {'Q3E804'},
1466490 : {'Q8TGJ0'},
1466491 : {'Q45U18'},
1466492 : {'Q3E7Z4'},
1466493 : {'Q3E7Z5'},
1466494 : {'Q3E7Z3'},
1466495 : {'Q3E739'},
1466496 : {'Q8TGT0'},
1466497 : {'Q3E809'},
1466498 : {'Q8TGS9'},
1466499 : {'Q3E760'},
1466500 : {'Q8TGS8'},
1466501 : {'Q3E766'},
1466502 : {'Q8TGS7'},
1466503 : {'Q3E7A9'},
1466504 : {'Q3E7B5'},
1466505 : {'Q8TGS6'},
1466506 : {'Q3E782'},
1466507 : {'Q8TGS5'},
1466508 : {'Q8TGS4'},
1466509 : {'Q3E7Z0'},
1466510 : {'Q3E7Z1'},
1466511 : {'P0C074'},
1466512 : {'Q3E808'},
1466513 : {'P0C271'},
1466514 : {'Q3E767'},
1466515 : {'Q3E7Z2'},
1466516 : {'Q8TGJ3'},
1466517 : {'Q8TGJ2'},
1466518 : {'Q3E823'},
1466519 : {'Q8TGR9'},
1466520 : {'Q3E751'},
1466521 : {'Q3E7B4'},
1466522 : {'Q3E7B3'},
1466523 : {'Q8TGQ7'},
1466524 : {'Q3E758'},
1466525 : {'Q3E7Z6'},
1466526 : {'Q07074'},
1466527 : {'Q8TGT6'},
1466528 : {'Q05451'},
1466529 : {'Q3E746'},
1466530 : {'Q3E815'},
1466531 : {'P0CX90', 'P0CX91'},
1466532 : {'Q8TGT4'},
1466533 : {'Q8TGK1'},
1466534 : {'P0CX92', 'P0CX93'},
1466535 : {'Q8TGK0'},
1466536 : {'Q3E7A2'},
1466537 : {'Q8TGU4'},
1466538 : {'P0C0V2'},
1500485 : {'Q3E765'},
1500486 : {'Q8TGT2'},
1500487 : {'Q3E826'},
1500489 : {'Q3E833'},
2732686 : {'Q3E7B6'},
2746858 : {'Q3E7Y6'},
2746859 : {'Q8J0M4'},
2777172 : {'Q3E762'},
3077354 : {'Q3E787'},
3077355 : {'Q3E744'},
3289591 : {'Q2V2Q1'},
3628034 : {'Q2V2P4'},
3799967 : {'Q2V2Q3'},
3799968 : {'Q2V2Q2'},
3799969 : {'Q2V2Q0'},
3799970 : {'Q2V2P9'},
3799971 : {'Q2V2P8'},
3799972 : {'Q2V2P7'},
3799973 : {'Q2V2P6'},
3799974 : {'Q2V2P5'},
3799975 : {'Q2V2P2'},
3799976 : {'Q2V2P3'},
3799977 : {'Q2V2P1'},
3799978 : {'Q2V2P0'},
4036073 : {'Q03937'},
4594642 : {'P0CX94', 'P0CX95', 'P0CX96', 'P0CX97', 'P0CX98'},
5142379 : {'A5Z2X5'},
5814844 : {'P0C5N3'},
5848745 : {'P0C5R9'},
8656591 : {'Q8TGN9'},
13393611 : {'P0C5L6'},
13393612 : {'Q8TGN3'},
13393613 : {'Q8TGN5'},
13393614 : {'Q8TGU7'},
20098144 : {'A0A023PZB3'},
23547378 : {'A0A0B7P221'},
24573116 : {'P61829'},
24573117 : {'P00856'},
24573142 : {'P00420'},
}
# Copyright (C) 2014-2019 DV Klopfenstein. All rights reserved
| mit | 5,752,229,845,911,966,000 | 22.325017 | 84 | 0.524255 | false |
samupl/website | apps/blog/models.py | 1 | 2366 | from django.conf import settings
from django.db import models
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from django.template.defaultfilters import slugify
class Post(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL)
date = models.DateTimeField(verbose_name='Date', auto_now_add=True)
title = models.CharField(verbose_name='Title', max_length=1024)
slug = models.SlugField(unique=True)
content = models.TextField(verbose_name='Content')
comment_count = models.PositiveIntegerField(verbose_name='Comment count', default=0, editable=False)
@models.permalink
def get_absolute_url(self):
return 'blog:view', [str(self.id), str(self.slug)]
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.title)
super().save(*args, **kwargs)
def __str__(self):
return '{title} ({date})'.format(
title=self.title,
date=self.date
)
class Label(models.Model):
post = models.ForeignKey(Post)
label = models.CharField(max_length=90)
def __str__(self):
return '{label} ({post})'.format(
label=self.label,
post=self.post
)
class Meta:
unique_together = ['post', 'label']
class Comment(models.Model):
post = models.ForeignKey(Post)
date = models.DateTimeField(verbose_name='Date', auto_now_add=True)
username = models.CharField(verbose_name='Username', max_length=256)
email = models.EmailField(verbose_name='E-mail address')
content = models.TextField(max_length=4096)
ip = models.CharField(max_length=4096)
host = models.CharField(max_length=4096)
ua = models.CharField(null=True, blank=True, max_length=4096)
ref = models.CharField(null=True, blank=True, max_length=4096)
def __str__(self):
return '{username} ({email}, {ip}, {date}, {post})'.format(
username=self.username,
email=self.email,
ip=self.ip,
date=self.date,
post=self.post
)
@receiver(post_delete, sender=Comment)
@receiver(post_save, sender=Comment)
def calculate_comments_count(sender, instance, **kwargs):
entry = instance.post
entry.comment_count = Comment.objects.filter(post=entry).count()
entry.save()
| mit | -5,252,156,682,347,990,000 | 32.323944 | 104 | 0.652578 | false |
quantifiedcode-bot/invenio-search | setup.py | 1 | 4305 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Invenio module for information retrieval."""
import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
readme = open('README.rst').read()
history = open('CHANGES.rst').read()
requirements = [
'Flask>=0.10.1',
'six>=1.7.2',
'invenio-access>=0.1.0',
'invenio-accounts>=0.1.2',
'invenio-base>=0.1.0',
'invenio-formatter>=0.2.1',
'invenio-knowledge>=0.1.0',
'invenio-query-parser>=0.2',
'invenio-upgrader>=0.1.0',
]
test_requirements = [
'unittest2>=1.1.0',
'Flask_Testing>=0.4.1',
'pytest>=2.7.0',
'pytest-cov>=1.8.0',
'pytest-pep8>=1.0.6',
'coverage>=3.7.1',
]
class PyTest(TestCommand):
"""PyTest Test."""
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
"""Init pytest."""
TestCommand.initialize_options(self)
self.pytest_args = []
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
config = ConfigParser()
config.read('pytest.ini')
self.pytest_args = config.get('pytest', 'addopts').split(' ')
def finalize_options(self):
"""Finalize pytest."""
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
"""Run tests."""
# import here, cause outside the eggs aren't loaded
import pytest
import _pytest.config
pm = _pytest.config.get_plugin_manager()
pm.consider_setuptools_entrypoints()
errno = pytest.main(self.pytest_args)
sys.exit(errno)
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join('invenio_search', 'version.py'), 'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
setup(
name='invenio-search',
version=version,
description=__doc__,
long_description=readme + '\n\n' + history,
keywords='invenio TODO',
license='GPLv2',
author='CERN',
author_email='[email protected]',
url='https://github.com/inveniosoftware/invenio-search',
packages=[
'invenio_search',
],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=requirements,
extras_require={
'docs': [
'Sphinx>=1.3',
'sphinx_rtd_theme>=0.1.7'
],
'tests': test_requirements
},
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
'Development Status :: 1 - Planning',
],
tests_require=test_requirements,
cmdclass={'test': PyTest},
)
| gpl-2.0 | 5,711,932,758,240,129,000 | 29.75 | 76 | 0.628339 | false |
svebk/DeepSentiBank_memex | workflows/images-incremental-update/images-incremental-update.py | 1 | 58831 | import os
import json
import time
import calendar
import datetime
import dateutil.parser
import sys
print(sys.version)
import subprocess
dev = False
if dev:
dev_release_suffix = "_dev"
base_incremental_path = '/user/skaraman/data/images_incremental_update_dev/'
else:
dev_release_suffix = "_release"
base_incremental_path = '/user/worker/dig2/incremental/'
from optparse import OptionParser
from pyspark import SparkContext, SparkConf, StorageLevel
from elastic_manager import ES
from hbase_manager import HbaseManager
# deprecated, now uptonow option
#query_ts_minmax = True # Otherwise get everything after es_ts_start
day_gap = 86400000 # One day
ts_gap = day_gap
time_sleep_update_out = 10
#ts_gap = 10000000
#ts_gap = 10000
# default settings
#fields_cdr = ["obj_stored_url", "obj_parent", "obj_original_url", "timestamp", "crawl_data.image_id", "crawl_data.memex_ht_id"]
max_ts = 9999999999999
fields_cdr = ["obj_stored_url", "obj_parent"]
fields_list = [("info","all_cdr_ids"), ("info","s3_url"), ("info","all_parent_ids"), ("info","image_discarded"), ("info","cu_feat_id")]
##-- General RDD I/O
##------------------
def get_list_value(json_x,field_tuple):
return [x["value"] for x in json_x if x["columnFamily"]==field_tuple[0] and x["qualifier"]==field_tuple[1]]
def check_hdfs_file(hdfs_file_path):
proc = subprocess.Popen(["hdfs", "dfs", "-ls", hdfs_file_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if "Filesystem closed" in err:
print("[check_hdfs_file: WARNING] Beware got error '{}' when checking for file: {}.".format(err, hdfs_file_path))
sys.stdout.flush()
print "[check_hdfs_file] out: {}, err: {}".format(out, err)
return out, err
def hdfs_file_exist(hdfs_file_path):
out, err = check_hdfs_file(hdfs_file_path)
# too restrictive as even log4j error would be interpreted as non existing file
#hdfs_file_exist = "_SUCCESS" in out and not "_temporary" in out and not err
hdfs_file_exist = "_SUCCESS" in out
return hdfs_file_exist
def hdfs_file_failed(hdfs_file_path):
out, err = check_hdfs_file(hdfs_file_path)
hdfs_file_failed = "_temporary" in out
return hdfs_file_failed
def load_rdd_json(basepath_save, rdd_name):
rdd_path = basepath_save + "/" + rdd_name
rdd = None
try:
if hdfs_file_exist(rdd_path):
print("[load_rdd_json] trying to load rdd from {}.".format(rdd_path))
rdd = sc.sequenceFile(rdd_path).mapValues(json.loads)
except Exception as inst:
print("[load_rdd_json: caught error] could not load rdd from {}. Error was {}.".format(rdd_path, inst))
return rdd
def save_rdd_json(basepath_save, rdd_name, rdd, incr_update_id, hbase_man_update_out):
rdd_path = basepath_save + "/" + rdd_name
if not rdd.isEmpty():
try:
if not hdfs_file_exist(rdd_path):
print("[save_rdd_json] saving rdd to {}.".format(rdd_path))
rdd.mapValues(json.dumps).saveAsSequenceFile(rdd_path)
else:
print("[save_rdd_json] skipped saving rdd to {}. File already exists.".format(rdd_path))
save_info_incremental_update(hbase_man_update_out, incr_update_id, rdd_path, rdd_name+"_path")
except Exception as inst:
print("[save_rdd_json: caught error] could not save rdd at {}, error was {}.".format(rdd_path, inst))
else:
save_info_incremental_update(hbase_man_update_out, incr_update_id, "EMPTY", rdd_name+"_path")
# is this inducing respawn when called twice within short timespan?
# should we reinstantiate a different hbase_man_update_out every time?
def save_info_incremental_update(hbase_man_update_out, incr_update_id, info_value, info_name):
print("[save_info_incremental_update] saving update info {}: {}".format(info_name, info_value))
incr_update_infos_list = []
incr_update_infos_list.append((incr_update_id, [incr_update_id, "info", info_name, str(info_value)]))
incr_update_infos_rdd = sc.parallelize(incr_update_infos_list)
hbase_man_update_out.rdd2hbase(incr_update_infos_rdd)
##------------------
##-- END General RDD I/O
##-- S3 URL functions
##-------------------
def clean_up_s3url_sha1(data):
try:
s3url = unicode(data[0]).strip()
json_x = [json.loads(x) for x in data[1].split("\n")]
sha1 = get_list_value(json_x,("info","sha1"))[0].strip()
return [(s3url, sha1)]
except:
print("[clean_up_s3url_sha1] failed, data was: {}".format(data))
return []
def get_SHA1_from_URL(URL):
import image_dl
sha1hash = image_dl.get_SHA1_from_URL_StringIO(URL,1) # 1 is verbose level
return sha1hash
def get_row_sha1(URL_S3,verbose=False):
row_sha1 = None
if type(URL_S3) == unicode and URL_S3 != u'None' and URL_S3.startswith('https://s3'):
row_sha1 = get_SHA1_from_URL(URL_S3)
if row_sha1 and verbose:
print "Got new SHA1 {} from_url {}.".format(row_sha1,URL_S3)
return row_sha1
def check_get_sha1_s3url(data):
URL_S3 = data[0]
row_sha1 = get_row_sha1(unicode(URL_S3),0)
if row_sha1:
return [(URL_S3, (list(data[1][0]), row_sha1))]
return []
def get_s3url_sha1(data):
sha1 = data[0]
json_x = data[1]
try:
s3url_list = get_list_value(json_x,("info","obj_stored_url"))
sha1_list = get_list_value(json_x,("info","sha1"))
if s3url_list and sha1_list:
s3url = s3url_list[0].strip()
sha1 = sha1_list[0].strip()
if not s3url.startswith('https://s3'):
raise ValueError('s3url is not stored in S3.')
else:
if not sha1_list:
raise ValueError('sha1 is not computed.')
if not s3url_list:
raise ValueError('s3url is absent.')
except Exception as inst:
print "[get_s3url_sha1: error] Could not get sha1 or s3url for row {}. {}".format(key, inst)
return []
if sha1 and s3url:
return [(s3url, [s3url, "info", "sha1", sha1.upper()])]
return []
def reduce_s3url_infos(a,b):
a.extend(b)
return a
def reduce_s3_keep_one_sha1(a,b):
if a != b:
raise ValueError("[reduce_s3_keep_one_sha1: error] one s3url has two differnet sha1 values {} and {}.".format(a, b))
return a
def hbase_out_s3url_sha1(data):
s3_url = data[0]
sha1 = data[1]
if sha1 and s3_url:
return [(s3_url, [s3_url, "info", "sha1", sha1.upper()])]
return []
def to_s3_url_key_dict_list(data):
doc_id = data[0]
v = data[1]
tup_list = []
if "info:obj_stored_url" in v:
s3url = v["info:obj_stored_url"]
if s3url.startswith('https://s3'):
v["info:doc_id"] = doc_id
tup_list = [(s3url, [v])]
return tup_list
def s3url_dict_list_to_cdr_id_wsha1(data):
if len(data[1]) != 2 or data[1][1] is None or data[1][1] == 'None' or data[1][1] == u'None':
print("[s3url_dict_list_to_cdr_id_wsha1] incorrect data: {}".format(data))
return []
s3_url = data[0]
list_v = data[1][0]
sha1 = data[1][1]
tup_list = []
for v in list_v:
if sha1:
doc_id = v["info:doc_id"]
if type(sha1) == list and len(sha1)==1:
v["info:sha1"] = sha1[0]
else:
v["info:sha1"] = sha1
tup_list.append((doc_id, v))
return tup_list
def dump_s3url_info_list_dict(x):
v = dict()
v["left"] = dict()
i = 0
#for w in list(x[0]):
for w in x[0]:
if w:
v["left"][str(i)] = json.dumps(w)
i += 1
if x[1]:
v["right"] = x[1]
return json.dumps(v)
def load_s3url_info_list_dict(x):
v = json.loads(x)
x0 = []
x1 = []
for w in v["left"]:
x0.append(json.loads(v["left"][w]))
#x0.append(json.loads(w))
if "right" in v:
x1 = v["right"]
return (x0, x1)
def load_s3url_infos_rdd_join(s3url_infos_rdd_join_path):
s3url_infos_rdd_join = None
try:
if hdfs_file_exist(s3url_infos_rdd_join_path):
s3url_infos_rdd_join = sc.sequenceFile(s3url_infos_rdd_join_path).mapValues(load_s3url_info_list_dict)
print("[load_s3url_infos_rdd_join: info] first samples of s3url_infos_rdd_join looks like: {}".format(s3url_infos_rdd_join.take(5)))
except Exception as inst:
print("[load_s3url_infos_rdd_join: caught error] Could not load rdd at {}. Error was {}.".format(s3url_infos_rdd_join_path, inst))
return s3url_infos_rdd_join
def save_s3url_infos_rdd_join(s3url_infos_rdd_join, hbase_man_update_out, incr_update_id, s3url_infos_rdd_join_path, s3url_infos_rdd_join_path_str):
try:
if not hdfs_file_exist(s3url_infos_rdd_join_path):
print("[save_s3url_infos_rdd_join: info] saving 's3url_infos_rdd_join' to {}.".format(s3url_infos_rdd_join_path))
s3url_infos_rdd_join_tosave = s3url_infos_rdd_join.mapValues(dump_s3url_info_list_dict)
print("[save_s3url_infos_rdd_join: info] first samples of s3url_infos_rdd_join_tosave looks like: {}".format(s3url_infos_rdd_join_tosave.take(5)))
s3url_infos_rdd_join_tosave.saveAsSequenceFile(s3url_infos_rdd_join_path)
else:
print("[save_s3url_infos_rdd_join] skipped saving rdd to {}. File already exists.".format(s3url_infos_rdd_join_path))
save_info_incremental_update(hbase_man_update_out, incr_update_id, s3url_infos_rdd_join_path, s3url_infos_rdd_join_path_str)
except Exception as inst:
print("[save_s3url_infos_rdd_join: caught error] could not save rdd at {}, error was {}.".format(s3url_infos_rdd_join_path, inst))
def get_s3url_infos_rdd_join(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_update_out, incr_update_id, nb_partitions, c_options, start_time):
rdd_name = "s3url_infos_rdd_join"
s3url_infos_rdd_join_path = basepath_save + "/" + rdd_name
# always try to load from disk
s3url_infos_rdd_join = load_s3url_infos_rdd_join(s3url_infos_rdd_join_path)
if s3url_infos_rdd_join is not None:
print("[get_s3url_infos_rdd_join: info] loaded rdd from {}.".format(s3url_infos_rdd_join_path))
return s3url_infos_rdd_join
# get dependency cdr_ids_infos_rdd
cdr_ids_infos_rdd = get_cdr_ids_infos_rdd(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_update_out, incr_update_id, nb_partitions, c_options, start_time)
if cdr_ids_infos_rdd is None:
print("[get_s3url_infos_rdd_join] cdr_ids_infos_rdd is empty.")
save_info_incremental_update(hbase_man_update_out, incr_update_id, 0, rdd_name+"_count")
save_info_incremental_update(hbase_man_update_out, incr_update_id, "EMPTY", rdd_name+"_path")
return None
print("[get_s3url_infos_rdd_join: info] computing rdd s3url_infos_rdd_join.")
# there could be duplicates cdr_id near indices boundary or corrections might have been applied...
cdr_ids_infos_rdd_red = cdr_ids_infos_rdd.reduceByKey(reduce_cdrid_infos)
# invert cdr_ids_infos_rdd (k,v) into s3url_infos_rdd (v[s3_url],[v,v['cdr_id']=k])
s3url_infos_rdd = cdr_ids_infos_rdd_red.flatMap(to_s3_url_key_dict_list)
s3url_infos_rdd_red = s3url_infos_rdd.reduceByKey(reduce_s3url_infos)
# get some stats
s3url_infos_rdd_count = s3url_infos_rdd.count()
print("[get_s3url_infos_rdd_join: info] s3url_infos_rdd_count is: {}".format(s3url_infos_rdd_count))
s3url_infos_rdd_red_count = s3url_infos_rdd_red.count()
print("[get_s3url_infos_rdd_join: info] s3url_infos_rdd_red_count is: {}".format(s3url_infos_rdd_red_count))
print("[get_s3url_infos_rdd_join: info] s3url_infos_rdd_red first samples looks like: {}".format(s3url_infos_rdd_red.take(10)))
if c_options.join_s3url:
try:
# try to reload from disk
s3url_sha1_rdd = sc.sequenceFile(basepath_save + "/s3url_sha1_rdd")
except Exception as inst:
# read s3url_sha1 table into s3url_sha1 to get sha1 here without downloading images
print("[get_s3url_infos_rdd_join] starting to read from s3url_sha1 HBase table.")
s3url_sha1_rdd = hbase_man_s3url_sha1_in.read_hbase_table().flatMap(clean_up_s3url_sha1)
# never save that anymore, too big.
# try:
# s3url_sha1_rdd.saveAsSequenceFile(basepath_save + "/s3url_sha1_rdd")
# except Exception as inst:
# pass
s3url_sha1_rdd_count = s3url_sha1_rdd.count()
print("[get_s3url_infos_rdd_join: info] s3url_sha1_rdd_count is: {}".format(s3url_sha1_rdd_count))
s3url_sha1_rdd_partitioned = s3url_sha1_rdd.partitionBy(nb_partitions).persist(StorageLevel.MEMORY_AND_DISK)
s3url_infos_rdd_red_partitioned = s3url_infos_rdd_red.partitionBy(nb_partitions).persist(StorageLevel.MEMORY_AND_DISK)
#print("[get_s3url_infos_rdd_join] start running 's3url_infos_rdd_red.cogroup(s3url_sha1_rdd)'.")
#s3url_infos_rdd_join = s3url_infos_rdd_red_partitioned.cogroup(s3url_sha1_rdd_partitioned)
print("[get_s3url_infos_rdd_join] start running 's3url_infos_rdd_red.leftOuterJoin(s3url_sha1_rdd)'.")
s3url_infos_rdd_join = s3url_infos_rdd_red_partitioned.leftOuterJoin(s3url_sha1_rdd_partitioned)
s3url_infos_rdd_join_count = s3url_infos_rdd_join.count()
print("[get_s3url_infos_rdd_join: info] s3url_infos_rdd_join_count is: {}".format(s3url_infos_rdd_join_count))
else:
print("[get_s3url_infos_rdd_join: info] skipping join with s3url_sha1 table as requested from options.")
# Fake a join so everything after run the same way.
# The real would have a SHA1 has right side value for already existing s3 URLs
s3url_infos_rdd_join = s3url_infos_rdd_red.mapValues(lambda x: (x, None))
# Save rdd
if c_options.save_inter_rdd:
save_s3url_infos_rdd_join(s3url_infos_rdd_join, hbase_man_update_out, incr_update_id, s3url_infos_rdd_join_path, "s3url_infos_rdd_join_path")
return s3url_infos_rdd_join
def save_new_s3_url(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, incr_update_id, nb_partitions, c_options):
## save out newly computed s3url
cdr_ids_infos_rdd_new_sha1 = get_cdr_ids_infos_rdd_new_sha1(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, incr_update_id, nb_partitions, c_options)
if cdr_ids_infos_rdd_new_sha1 is None:
print("[save_new_s3_url] cdr_ids_infos_rdd_new_sha1 is empty.")
save_info_incremental_update(hbase_man_update_out, incr_update_id, 0, "new_s3url_sha1_rdd_count")
return
# invert cdr_ids_infos_rdd_new_sha1 to (s3url, sha1) and apply reduceByKey() selecting any sha1
new_s3url_sha1_rdd = cdr_ids_infos_rdd_new_sha1.flatMap(cdrid_key_to_s3url_key_sha1_val)
out_new_s3url_sha1_rdd = new_s3url_sha1_rdd.reduceByKey(reduce_s3_keep_one_sha1).flatMap(hbase_out_s3url_sha1)
print("[save_new_s3_url: info] out_new_s3url_sha1_rdd first samples look like: {}".format(out_new_s3url_sha1_rdd.take(5)))
print("[save_new_s3_url] saving 'out_new_s3url_sha1_rdd' to HBase.")
hbase_man_s3url_sha1_out.rdd2hbase(out_new_s3url_sha1_rdd)
## save new images update infos
new_s3url_sha1_rdd_count = out_new_s3url_sha1_rdd.count()
print("[save_new_s3_url] new_s3url_sha1_rdd_count count: {}".format(new_s3url_sha1_rdd_count))
save_info_incremental_update(hbase_man_update_out, incr_update_id, new_s3url_sha1_rdd_count, "new_s3url_sha1_rdd_count")
##-------------------
##-- END S3 URL functions
## SHA1 and CDR ids related functions
def expand_info(data):
key = data[0]
json_x = data[1]
out = []
for field in json_x:
fs = field.split(':')
out.append((key, [key, fs[0], fs[1], json_x[field]]))
return out
def create_images_tuple(data):
doc_id = data[0]
json_x = json.loads(data[1])
ts = json_x["_metadata"]["_timestamp"]
key = str(max_ts-ts)+"_"+doc_id
tup_list=[ (key, [key, "info", "doc_id", doc_id])]
for field in fields_cdr:
try:
field_value = json_x[field][0]
if field.endswith("url"):
str_field_value = unicode(field_value)
else:
str_field_value = str(field_value)
tup_list.append( (key, [key, "info", field, str_field_value]) )
except Exception as inst:
pass
return tup_list
def cdrid_key_to_sha1_key(data):
cdr_id = data[0]
json_x = data[1]
sha1 = None
obj_stored_url = None
obj_parent = None
try:
sha1_val = json_x["info:sha1"]
if type(sha1_val)==list and len(sha1_val)==1:
sha1 = sha1_val[0].strip()
else:
sha1 = sha1_val.strip()
obj_stored_url = unicode(json_x["info:obj_stored_url"].strip())
obj_parent = json_x["info:obj_parent"].strip()
except Exception as inst2:
print("[cdrid_key_to_sha1_key] could not read sha1, obj_stored_url or obj_parent for cdr_id {}".format(cdr_id))
pass
if cdr_id and sha1 and obj_stored_url and obj_parent:
return [(sha1, {"info:all_cdr_ids": [cdr_id], "info:s3_url": [obj_stored_url], "info:all_parent_ids": [obj_parent]})]
return []
def cdrid_key_to_s3url_key_sha1_val(data):
json_x = data[1]
sha1 = None
obj_stored_url = None
try:
sha1_val = json_x["info:sha1"]
if type(sha1_val)==list and len(sha1_val)==1:
sha1 = sha1_val[0].strip()
else:
sha1 = sha1_val.strip()
obj_stored_url = unicode(json_x["info:obj_stored_url"].strip())
except Exception as inst2:
pass
if obj_stored_url and sha1:
return [(obj_stored_url, sha1)]
return []
def sha1_key_json(data):
sha1 = data[0]
json_x = [json.loads(x) for x in data[1].split("\n")]
v = dict()
for field in fields_list:
try:
if field[1]!='s3_url':
v[':'.join(field)] = list(set([x for x in get_list_value(json_x,field)[0].strip().split(',')]))
else:
v[':'.join(field)] = [unicode(get_list_value(json_x,field)[0].strip())]
except: # field not in row
pass
return [(sha1, v)]
def reduce_cdrid_infos(a,b):
''' If we have two samples with the same cdr_id we want to keep the newest
that may be a correction of the older one.
'''
c = dict()
if a["info:insert_ts"] > b["info:insert_ts"]:
c = a
else:
c = b
return c
def safe_reduce_infos(a, b, c, field):
try:
c[field] = list(set(a[field]+b[field]))
except Exception as inst:
try:
c[field] = a[field]
print("[safe_reduce_infos: error] key error for '{}' for b".format(field))
except Exception as inst2:
try:
c[field] = b[field]
print("[safe_reduce_infos: error] key error for '{}' for a".format(field))
except Exception as inst3:
c[field] = []
print("[safe_reduce_infos: error] key error for '{}' for both a and b".format(field))
return c
def safe_assign(a, c, field, fallback):
if field in a:
c[field] = a[field]
else:
print("[safe_assign: error] we have no {}.".format(field))
c[field] = fallback
return c
def test_info_s3_url(dict_img):
return "info:s3_url" in dict_img and dict_img["info:s3_url"] and dict_img["info:s3_url"][0]!=u'None' and dict_img["info:s3_url"][0].startswith('https://s3')
def reduce_sha1_infos_discarding(a,b):
c = dict()
if b: # sha1 already existed
if "info:image_discarded" in a or "info:image_discarded" in b:
c["info:all_cdr_ids"] = []
c["info:all_parent_ids"] = []
c["info:image_discarded"] = 'discarded because has more than {} cdr_ids'.format(max_images_reduce)
else:
# KeyError: 'info:all_cdr_ids'. How could an image not have this field?
c = safe_reduce_infos(a, b, c, "info:all_cdr_ids")
c = safe_reduce_infos(a, b, c, "info:all_parent_ids")
#if "info:s3_url" in a and a["info:s3_url"] and a["info:s3_url"].startswith('https://s3') and a["info:s3_url"][0]!=u'None':
if test_info_s3_url(a):
c["info:s3_url"] = a["info:s3_url"]
else:
if test_info_s3_url(b):
c["info:s3_url"] = b["info:s3_url"]
else:
print("[reduce_sha1_infos_discarding: error] both a and b have no s3 url.")
c["info:s3_url"] = [None]
# need to keep info:cu_feat_id if it exists
if "info:cu_feat_id" in b:
c["info:cu_feat_id"] = b["info:cu_feat_id"]
else: # brand new image
c = safe_assign(a, c, "info:s3_url", [None])
c = safe_assign(a, c, "info:all_cdr_ids", [])
c = safe_assign(a, c, "info:all_parent_ids", [])
# should discard if bigger than max(max_images_hbase, max_images_dig)...
if len(c["info:all_cdr_ids"]) > max_images_reduce or len(c["info:all_parent_ids"]) > max_images_reduce:
print("Discarding image with URL: {}".format(c["info:s3_url"][0]))
c["info:all_cdr_ids"] = []
c["info:all_parent_ids"] = []
c["info:image_discarded"] = 'discarded because has more than {} cdr_ids'.format(max_images_reduce)
return c
def split_sha1_kv_images_discarded(x):
# this prepares data to be saved in HBase
tmp_fields_list = [("info","all_cdr_ids"), ("info","s3_url"), ("info","all_parent_ids")]
out = []
if "info:image_discarded" in x[1] or len(x[1]["info:all_cdr_ids"]) > max_images_hbase or len(x[1]["info:all_parent_ids"]) > max_images_hbase:
if "info:image_discarded" not in x[1]:
x[1]["info:image_discarded"] = 'discarded because has more than {} cdr_ids'.format(max_images_hbase)
out.append((x[0], [x[0], "info", "image_discarded", x[1]["info:image_discarded"]]))
str_s3url_value = None
s3url_value = x[1]["info:s3_url"][0]
str_s3url_value = unicode(s3url_value)
out.append((x[0], [x[0], "info", "s3_url", str_s3url_value]))
out.append((x[0], [x[0], "info", "all_cdr_ids", x[1]["info:image_discarded"]]))
out.append((x[0], [x[0], "info", "all_parent_ids", x[1]["info:image_discarded"]]))
else:
for field in tmp_fields_list:
if field[1]=="s3_url":
out.append((x[0], [x[0], field[0], field[1], unicode(x[1][field[0]+":"+field[1]][0])]))
else:
out.append((x[0], [x[0], field[0], field[1], ','.join(x[1][field[0]+":"+field[1]])]))
return out
def flatten_leftjoin(x):
out = []
# at this point value is a tuple of two lists with a single or empty dictionary
c = reduce_sha1_infos_discarding(x[1][0],x[1][1])
out.append((x[0], c))
return out
def to_cdr_id_dict(data):
doc_id = data[0]
v = dict()
json_x = json.loads(data[1])
insert_ts = str(json_x["_metadata"]["_timestamp"])
v["info:insert_ts"] = insert_ts
v["info:doc_id"] = doc_id
del json_x["_metadata"]
for field in json_x:
try:
v["info:"+field] = str(json_x[field][0])
except Exception as inst:
print("[to_cdr_id_dict: error] {} for doc: {}. Assuming it is an encoding issue.".format(inst, doc_id))
try:
v["info:"+field] = json_x[field][0].encode('utf-8')
except Exception as inst2:
print("[to_cdr_id_dict: error] failed again ({}) for doc: {}.".format(inst2, doc_id))
pass
tup_list = [(doc_id, v)]
#print("[to_cdr_id_dict] {}".format(tup_list))
return tup_list
def get_existing_joined_sha1(data):
if len(data[1]) == 2 and data[1][1] and data[1][1] is not None and data[1][1] != 'None' and data[1][1] != u'None':
return True
return False
##-- New images for features computation functions
##---------------
def build_batch_out(batch_update, incr_update_id, batch_id):
update_id = "index_update_"+incr_update_id+'_'+str(batch_id)
list_key = []
for x in batch_update:
list_key.append(x)
return [(update_id, [update_id, "info", "list_sha1s", ','.join(list_key)])]
def save_new_sha1s_for_index_update_batchwrite(new_sha1s_rdd, hbase_man_update_out, batch_update_size, incr_update_id, total_batches, nb_batchwrite=32):
start_save_time = time.time()
# use toLocalIterator if new_sha1s_rdd would be really big and won't fit in the driver's memory
#iterator = new_sha1s_rdd.toLocalIterator()
iterator = new_sha1s_rdd.collect()
batch_update = []
batch_out = []
batch_id = 0
push_batches = False
for x in iterator:
batch_update.append(x)
if len(batch_update)==batch_update_size:
if batch_id > 0 and batch_id % nb_batchwrite == 0:
push_batches = True
try:
print("[save_new_sha1s_for_index_update_batchwrite] preparing batch {}/{} starting with: {}".format(batch_id+1, total_batches, batch_update[:10]))
batch_out.extend(build_batch_out(batch_update, incr_update_id, batch_id))
batch_id += 1
except Exception as inst:
print("[save_new_sha1s_for_index_update_batchwrite] Could not create/save batch {}. Error was: {}".format(batch_id, inst))
batch_update = []
if push_batches:
batch_out_rdd = sc.parallelize(batch_out)
print("[save_new_sha1s_for_index_update_batchwrite] saving {} batches of {} new images to HBase.".format(len(batch_out), batch_update_size))
hbase_man_update_out.rdd2hbase(batch_out_rdd)
batch_out = []
push_batches = False
# last batch
if batch_update:
try:
print("[save_new_sha1s_for_index_update_batchwrite] will prepare and save last batch {}/{} starting with: {}".format(batch_id+1, total_batches, batch_update[:10]))
batch_out.extend(build_batch_out(batch_update, incr_update_id, batch_id))
batch_out_rdd = sc.parallelize(batch_out)
print("[save_new_sha1s_for_index_update_batchwrite] saving {} batches of {} new images to HBase.".format(len(batch_out), batch_update_size))
hbase_man_update_out.rdd2hbase(batch_out_rdd)
#batch_rdd.unpersist()
except Exception as inst:
print("[save_new_sha1s_for_index_update_batchwrite] Could not create/save batch {}. Error was: {}".format(batch_id, inst))
print("[save_new_sha1s_for_index_update_batchwrite] DONE in {}s".format(time.time() - start_save_time))
def save_new_images_for_index(basepath_save, out_rdd, hbase_man_update_out, incr_update_id, batch_update_size, c_options, new_images_to_index_str):
# save images without cu_feat_id that have not been discarded for indexing
new_images_to_index = out_rdd.filter(lambda x: "info:image_discarded" not in x[1] and "info:cu_feat_id" not in x[1])
new_images_to_index_count = new_images_to_index.count()
print("[save_new_images_for_index] {}_count count: {}".format(new_images_to_index_str, new_images_to_index_count))
save_info_incremental_update(hbase_man_update_out, incr_update_id, new_images_to_index_count, new_images_to_index_str+"_count")
import numpy as np
total_batches = int(np.ceil(np.float32(new_images_to_index_count)/batch_update_size))
# partition to the number of batches?
# 'save_new_sha1s_for_index_update' uses toLocalIterator()
new_images_to_index_partitioned = new_images_to_index.partitionBy(total_batches)
# save to HDFS too
if c_options.save_inter_rdd:
try:
new_images_to_index_out_path = basepath_save + "/" + new_images_to_index_str
if not hdfs_file_exist(new_images_to_index_out_path):
print("[save_new_images_for_index] saving rdd to {}.".format(new_images_to_index_out_path))
new_images_to_index_partitioned.keys().saveAsTextFile(new_images_to_index_out_path)
else:
print("[save_new_images_for_index] skipped saving rdd to {}. File already exists.".format(new_images_to_index_out_path))
save_info_incremental_update(hbase_man_update_out, incr_update_id, new_images_to_index_out_path, new_images_to_index_str+"_path")
except Exception as inst:
print("[save_new_images_for_index] could not save rdd 'new_images_to_index' at {}, error was {}.".format(new_images_to_index_out_path, inst))
# save by batch in HBase to let the API know it needs to index these images
print("[save_new_images_for_index] start saving by batches of {} new images.".format(batch_update_size))
# crashes in 'save_new_sha1s_for_index_update'?
#save_new_sha1s_for_index_update(new_images_to_index_partitioned.keys(), hbase_man_update_out, batch_update_size, incr_update_id, total_batches)
save_new_sha1s_for_index_update_batchwrite(new_images_to_index_partitioned.keys(), hbase_man_update_out, batch_update_size, incr_update_id, total_batches)
##---------------
##-- END New images for features computation functions
##-- Amandeep RDDs I/O
##---------------
def out_to_amandeep_dict_str(x):
# this is called with map()
sha1 = x[0]
# keys should be: "image_sha1", "all_parent_ids", "s3_url", "all_cdr_ids"
# keep "cu_feat_id" to be able to push images to be indexed
out_dict = dict()
out_dict["image_sha1"] = sha1
for field in ["all_parent_ids", "s3_url", "all_cdr_ids", "cu_feat_id"]:
if "info:"+field in x[1]:
out_dict[field] = x[1]["info:"+field]
return (sha1, json.dumps(out_dict))
def amandeep_dict_str_to_out(x):
# this is called with mapValues()
# keys should be: "image_sha1", "all_parent_ids", "s3_url", "all_cdr_ids"
# keep "cu_feat_id" to be able to push images to be indexed
tmp_dict = json.loads(x)
out_dict = dict()
#sha1 = tmp_dict["image_sha1"]
for field in ["all_parent_ids", "s3_url", "all_cdr_ids", "cu_feat_id"]:
if field in tmp_dict:
out_dict["info:"+field] = tmp_dict[field]
return out_dict
def filter_out_rdd(x):
return "info:image_discarded" not in x[1] and len(x[1]["info:all_cdr_ids"]) <= max_images_dig and len(x[1]["info:all_parent_ids"]) <= max_images_dig
##-- END Amandeep RDDs I/O
##---------------
##-- Incremental update get RDDs main functions
##---------------
def get_cdr_ids_infos_rdd(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_update_out, incr_update_id, nb_partitions, c_options, start_time):
rdd_name = "cdr_ids_infos_rdd"
# always try to load from disk
cdr_ids_infos_rdd = load_rdd_json(basepath_save, rdd_name)
if cdr_ids_infos_rdd is not None:
print("[get_cdr_ids_infos_rdd: info] cdr_ids_infos_rdd loaded rdd from {}.".format(basepath_save + "/" + rdd_name))
return cdr_ids_infos_rdd
if not c_options.uptonow and es_ts_end is not None:
query = "{\"fields\": [\""+"\", \"".join(fields_cdr)+"\"], \"query\": {\"filtered\": {\"query\": {\"match\": {\"content_type\": \"image/jpeg\"}}, \"filter\": {\"range\" : {\"_timestamp\" : {\"gte\" : "+str(es_ts_start)+", \"lt\": "+str(es_ts_end)+"}}}}}, \"sort\": [ { \"_timestamp\": { \"order\": \"asc\" } } ] }"
print("[get_cdr_ids_infos_rdd] query CDR for one day with: {}".format(query))
else:
query = "{\"fields\": [\""+"\", \"".join(fields_cdr)+"\"], \"query\": {\"filtered\": {\"query\": {\"match\": {\"content_type\": \"image/jpeg\"}}, \"filter\": {\"range\" : {\"_timestamp\" : {\"gte\" : "+str(es_ts_start)+"}}}}}, \"sort\": [ { \"_timestamp\": { \"order\": \"asc\" } } ] }"
print("[get_cdr_ids_infos_rdd] query CDR UP TO NOW with: {}".format(query))
# get incremental update
es_rdd_nopart = es_man.es2rdd(query)
if es_rdd_nopart.isEmpty():
print("[get_cdr_ids_infos_rdd] empty incremental update when querying from timestamp {}".format(es_ts_start))
return None
# es_rdd_nopart is likely to be underpartitioned
es_rdd = es_rdd_nopart.partitionBy(nb_partitions)
# save incremental update infos
incr_update_infos_list = []
es_rdd_count = es_rdd.count()
incr_update_infos_list.append((incr_update_id, [incr_update_id, "info", "start_time", str(start_time)]))
incr_update_infos_list.append((incr_update_id, [incr_update_id, "info", "es_rdd_count", str(es_rdd_count)]))
incr_update_infos_rdd = sc.parallelize(incr_update_infos_list)
hbase_man_update_out.rdd2hbase(incr_update_infos_rdd)
# save to hbase
images_ts_cdrid_rdd = es_rdd.flatMap(create_images_tuple)
print("[get_cdr_ids_infos_rdd: info] images_ts_cdrid_rdd first samples look like: {}".format(images_ts_cdrid_rdd.take(5)))
print("[get_cdr_ids_infos_rdd] saving 'images_ts_cdrid_rdd' to HBase.")
hbase_man_ts.rdd2hbase(images_ts_cdrid_rdd)
min_ts_cdrid = images_ts_cdrid_rdd.min()[0].strip()
max_ts_cdrid = images_ts_cdrid_rdd.max()[0].strip()
# save incremental update infos
incr_update_infos_list = []
incr_update_infos_list.append((incr_update_id, [incr_update_id, "info", "min_ts_cdrid", min_ts_cdrid]))
incr_update_infos_list.append((incr_update_id, [incr_update_id, "info", "max_ts_cdrid", max_ts_cdrid]))
incr_update_infos_rdd = sc.parallelize(incr_update_infos_list)
print("[get_cdr_ids_infos_rdd] saving incremental update infos: id {}, min_ts_cdrid {}, max_ts_cdrid {}".format(incr_update_id, min_ts_cdrid, max_ts_cdrid))
hbase_man_update_out.rdd2hbase(incr_update_infos_rdd)
cdr_ids_infos_rdd = es_rdd.flatMap(to_cdr_id_dict)
# save rdd
if c_options.save_inter_rdd:
save_rdd_json(basepath_save, rdd_name, cdr_ids_infos_rdd, incr_update_id, hbase_man_update_out)
return cdr_ids_infos_rdd
def get_cdr_ids_infos_rdd_join_sha1(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, incr_update_id, nb_partitions, c_options):
rdd_name = "cdr_ids_infos_rdd_join_sha1"
# always try to load from disk
cdr_ids_infos_rdd_join_sha1 = load_rdd_json(basepath_save, rdd_name)
if cdr_ids_infos_rdd_join_sha1 is not None:
print("[get_cdr_ids_infos_rdd_join_sha1: info] cdr_ids_infos_rdd_join_sha1 loaded rdd from {}.".format(basepath_save + "/" + rdd_name))
return cdr_ids_infos_rdd_join_sha1
# get dependency s3url_infos_rdd_join
s3url_infos_rdd_join = get_s3url_infos_rdd_join(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_update_out, incr_update_id, nb_partitions, c_options, start_time)
if s3url_infos_rdd_join is None:
print("[get_cdr_ids_infos_rdd_join_sha1] s3url_infos_rdd_join is empty.")
save_info_incremental_update(hbase_man_update_out, incr_update_id, 0, rdd_name+"_count")
save_info_incremental_update(hbase_man_update_out, incr_update_id, "EMPTY", rdd_name+"_path")
return None
# invert s3url_infos_rdd_join (s3_url, ([v], sha1)) into cdr_ids_infos_rdd_join_sha1 (k, [v]) adding info:sha1 in each v dict
s3url_infos_rdd_with_sha1 = s3url_infos_rdd_join.filter(get_existing_joined_sha1)
cdr_ids_infos_rdd_join_sha1 = s3url_infos_rdd_with_sha1.flatMap(s3url_dict_list_to_cdr_id_wsha1)
print("[get_cdr_ids_infos_rdd_join_sha1: info] cdr_ids_infos_rdd_join_sha1 first samples look like: {}".format(cdr_ids_infos_rdd_join_sha1.take(5)))
# save infos to hbase update table
cdr_ids_infos_rdd_join_sha1_count = cdr_ids_infos_rdd_join_sha1.count()
save_info_incremental_update(hbase_man_update_out, incr_update_id, cdr_ids_infos_rdd_join_sha1_count, rdd_name+"_count")
# save rdd content to hbase
print("[get_cdr_ids_infos_rdd_join_sha1: info] cdr_ids_infos_rdd_join_sha1 first samples look like: {}".format(cdr_ids_infos_rdd_join_sha1.take(5)))
print("[get_cdr_ids_infos_rdd_join_sha1] saving 'cdr_ids_infos_rdd_join_sha1' to HBase.")
hbase_man_cdrinfos_out.rdd2hbase(cdr_ids_infos_rdd_join_sha1.flatMap(expand_info))
# save rdd to disk
if c_options.save_inter_rdd:
save_rdd_json(basepath_save, rdd_name, cdr_ids_infos_rdd_join_sha1, incr_update_id, hbase_man_update_out)
return cdr_ids_infos_rdd_join_sha1
def get_update_join_rdd(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, incr_update_id, nb_partitions, c_options):
rdd_name = "update_join_rdd"
# always try to load from disk
update_join_rdd = load_rdd_json(basepath_save, rdd_name)
if update_join_rdd is not None:
print("[get_update_join_rdd: info] update_join_rdd loaded rdd from {}.".format(basepath_save + "/" + rdd_name))
return update_join_rdd
# we need cdr_ids_infos_rdd_join_sha1
cdr_ids_infos_rdd_join_sha1 = get_cdr_ids_infos_rdd_join_sha1(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, incr_update_id, nb_partitions, c_options)
if cdr_ids_infos_rdd_join_sha1 is None:
print("[get_update_join_rdd] cdr_ids_infos_rdd_join_sha1 is empty.")
save_info_incremental_update(hbase_man_update_out, incr_update_id, 0, rdd_name+"_count")
save_info_incremental_update(hbase_man_update_out, incr_update_id, "EMPTY", rdd_name+"_path")
return None
# transform cdr_id rdd into sha1 rdd
print("[get_update_join_rdd] cdr_ids_infos_rdd_join_sha1 first samples are: {}".format(cdr_ids_infos_rdd_join_sha1.take(5)))
sha1_infos_rdd_from_join = cdr_ids_infos_rdd_join_sha1.flatMap(cdrid_key_to_sha1_key)
update_join_rdd = sha1_infos_rdd_from_join.reduceByKey(reduce_sha1_infos_discarding)
# save rdd infos
update_join_rdd_count = update_join_rdd.count()
save_info_incremental_update(hbase_man_update_out, incr_update_id, update_join_rdd_count, rdd_name+"_count")
# save to disk
if c_options.save_inter_rdd:
save_rdd_json(basepath_save, rdd_name, update_join_rdd, incr_update_id, hbase_man_update_out)
return update_join_rdd
def compute_out_join_rdd(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, hbase_man_sha1infos_out, incr_update_id, nb_partitions, c_options, start_time):
## check if we not already have computed this join step of this update
out_join_rdd_path = basepath_save + "/out_join_rdd"
out_join_rdd_amandeep = None
update_join_rdd_partitioned = None
sha1_infos_rdd_json = None
if c_options.restart:
try:
if hdfs_file_exist(out_join_rdd_path):
out_join_rdd_amandeep = sc.sequenceFile(out_join_rdd_path).mapValues(amandeep_dict_str_to_out)
except Exception as inst:
pass
if out_join_rdd_amandeep is not None:
# consider already processed
print("[compute_out_join_rdd] out_join_rdd already computed for update {}.".format(incr_update_id))
# if we are re-running this, it might mean we did not manage to save to HBase. Retrying
save_out_rdd_to_hbase(out_join_rdd_amandeep, hbase_man_sha1infos_out)
return out_join_rdd_amandeep
## try to reload rdds that could have already been computed, compute chain of dependencies if needed
# propagate down es_ts_end
update_join_rdd = get_update_join_rdd(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, incr_update_id, nb_partitions, c_options)
if update_join_rdd is None:
print("[compute_out_join_rdd] update_join_rdd is empty.")
else:
## update cdr_ids, and parents cdr_ids for the existing sha1s (if any)
print("[compute_out_join_rdd] Reading from sha1_infos HBase table.")
sha1_infos_rdd = hbase_man_sha1infos_join.read_hbase_table()
if not sha1_infos_rdd.isEmpty():
update_join_rdd_partitioned = update_join_rdd.partitionBy(nb_partitions).persist(StorageLevel.MEMORY_AND_DISK)
sha1_infos_rdd_json = sha1_infos_rdd.flatMap(sha1_key_json).partitionBy(nb_partitions).persist(StorageLevel.MEMORY_AND_DISK)
update_join_sha1_rdd = update_join_rdd_partitioned.leftOuterJoin(sha1_infos_rdd_json).flatMap(flatten_leftjoin)
out_join_rdd_amandeep = update_join_sha1_rdd
else: # first update
out_join_rdd_amandeep = update_join_rdd
## save rdd
if c_options.save_inter_rdd:
if out_join_rdd_amandeep is None or out_join_rdd_amandeep.isEmpty():
save_info_incremental_update(hbase_man_update_out, incr_update_id, "EMPTY", "out_join_rdd_path")
else:
try:
if not hdfs_file_exist(out_join_rdd_path):
out_join_rdd_amandeep.filter(filter_out_rdd).map(out_to_amandeep_dict_str).saveAsSequenceFile(out_join_rdd_path)
else:
print("[compute_out_join_rdd] Skipped saving out_join_rdd. File already exists at {}.".format(out_join_rdd_path))
save_info_incremental_update(hbase_man_update_out, incr_update_id, out_join_rdd_path, "out_join_rdd_path")
except Exception as inst:
print("[compute_out_join_rdd] could not save rdd at {}, error was {}.".format(out_join_rdd_path, inst))
save_out_rdd_to_hbase(out_join_rdd_amandeep, hbase_man_sha1infos_out)
# if out_join_rdd_amandeep is not None:
# ## save sha1 infos for these joined images in HBase
# out_join_rdd = out_join_rdd_amandeep.flatMap(split_sha1_kv_images_discarded)
# print("[compute_out_join_rdd] saving 'out_join_rdd' to sha1_infos HBase table.")
# hbase_man_sha1infos_out.rdd2hbase(out_join_rdd)
return out_join_rdd_amandeep
def get_cdr_ids_infos_rdd_new_sha1(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, incr_update_id, nb_partitions, c_options):
## for not matching s3url i.e. missing sha1
rdd_name = "cdr_ids_infos_rdd_new_sha1"
# always try to load from disk
cdr_ids_infos_rdd_new_sha1 = load_rdd_json(basepath_save, rdd_name)
if cdr_ids_infos_rdd_new_sha1 is not None:
print("[get_cdr_ids_infos_rdd_new_sha1: info] cdr_ids_infos_rdd_new_sha1 loaded rdd from {}.".format(basepath_save + "/" + rdd_name))
print("[get_cdr_ids_infos_rdd_new_sha1: info] cdr_ids_infos_rdd_new_sha1 first samples look like: {}".format(cdr_ids_infos_rdd_new_sha1.take(5)))
return cdr_ids_infos_rdd_new_sha1
# get joined (actually all s3 urls of current update if not c_options.join_s3url), subtract, download images
s3url_infos_rdd_join = get_s3url_infos_rdd_join(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_update_out, incr_update_id, nb_partitions, c_options, start_time)
if s3url_infos_rdd_join is not None:
s3url_infos_rdd_with_sha1 = s3url_infos_rdd_join.filter(get_existing_joined_sha1)
if not s3url_infos_rdd_with_sha1.isEmpty():
s3url_infos_rdd_no_sha1 = s3url_infos_rdd_join.subtractByKey(s3url_infos_rdd_with_sha1)
else: # when all new s3 urls or not c_options.join_s3url
s3url_infos_rdd_no_sha1 = s3url_infos_rdd_join
s3url_infos_rdd_no_sha1_count = s3url_infos_rdd_no_sha1.count()
print("[get_cdr_ids_infos_rdd_new_sha1: info] starting to download images to get new sha1s for {} URLs.".format(s3url_infos_rdd_no_sha1_count))
s3url_infos_rdd_new_sha1 = s3url_infos_rdd_no_sha1.partitionBy(nb_partitions).flatMap(check_get_sha1_s3url)
cdr_ids_infos_rdd_new_sha1 = s3url_infos_rdd_new_sha1.flatMap(s3url_dict_list_to_cdr_id_wsha1)
else:
cdr_ids_infos_rdd_new_sha1 = None
if cdr_ids_infos_rdd_new_sha1 is None or cdr_ids_infos_rdd_new_sha1.isEmpty():
print("[get_cdr_ids_infos_rdd_new_sha1] cdr_ids_infos_rdd_new_sha1 is empty.")
save_info_incremental_update(hbase_man_update_out, incr_update_id, 0, rdd_name+"_count")
save_info_incremental_update(hbase_man_update_out, incr_update_id, "EMPTY", rdd_name+"_path")
return None
else:
# save rdd
print("[get_cdr_ids_infos_rdd_new_sha1: info] cdr_ids_infos_rdd_new_sha1 first samples look like: {}".format(cdr_ids_infos_rdd_new_sha1.take(5)))
if c_options.save_inter_rdd:
save_rdd_json(basepath_save, "cdr_ids_infos_rdd_new_sha1", cdr_ids_infos_rdd_new_sha1, incr_update_id, hbase_man_update_out)
# save infos
cdr_ids_infos_rdd_new_sha1_count = cdr_ids_infos_rdd_new_sha1.count()
save_info_incremental_update(hbase_man_update_out, incr_update_id, cdr_ids_infos_rdd_new_sha1_count, "cdr_ids_infos_rdd_new_sha1_count")
print("[get_cdr_ids_infos_rdd_new_sha1: info] cdr_ids_infos_rdd_new_sha1 first samples look like: {}".format(cdr_ids_infos_rdd_new_sha1.take(5)))
print("[get_cdr_ids_infos_rdd_new_sha1] saving 'cdr_ids_infos_rdd_new_sha1' to HBase.")
hbase_man_cdrinfos_out.rdd2hbase(cdr_ids_infos_rdd_new_sha1.flatMap(expand_info))
return cdr_ids_infos_rdd_new_sha1
def get_update_rdd(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, incr_update_id, nb_partitions, c_options):
rdd_name = "update_rdd"
# always try to load from disk
update_rdd = load_rdd_json(basepath_save, rdd_name)
if update_rdd is not None:
print("[get_update_rdd: info] update_rdd loaded rdd from {}.".format(basepath_save + "/" + rdd_name))
return update_rdd
cdr_ids_infos_rdd_new_sha1 = get_cdr_ids_infos_rdd_new_sha1(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, incr_update_id, nb_partitions, c_options)
if cdr_ids_infos_rdd_new_sha1 is None:
print("[get_update_rdd] cdr_ids_infos_rdd_new_sha1 is empty.")
save_info_incremental_update(hbase_man_update_out, incr_update_id, 0, rdd_name+"_count")
save_info_incremental_update(hbase_man_update_out, incr_update_id, "EMPTY", rdd_name+"_path")
return None
# here new sha1s means we did not see the corresponding s3url before, but the sha1 may still be in the sha1_infos table
# so we still need to merge potentially
update_rdd = cdr_ids_infos_rdd_new_sha1.flatMap(cdrid_key_to_sha1_key).reduceByKey(reduce_sha1_infos_discarding)
update_rdd_count = update_rdd.count()
save_info_incremental_update(hbase_man_update_out, incr_update_id, update_rdd_count, "update_rdd_count")
# save to disk
if c_options.save_inter_rdd:
save_rdd_json(basepath_save, rdd_name, update_rdd, incr_update_id, hbase_man_update_out)
# also return update_rdd_count to allows dynamic partitioning?
return update_rdd
def compute_out_rdd(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, hbase_man_sha1infos_out, incr_update_id, nb_partitions, c_options, start_time):
## check if we not already have computed this join step of this update
rdd_name = "out_rdd"
out_rdd_path = basepath_save + "/" + rdd_name
out_rdd_amandeep = None
update_rdd_partitioned = None
sha1_infos_rdd_json = None
if c_options.restart:
print "[compute_out_rdd] Looking for:",out_rdd_path
try:
if hdfs_file_exist(out_rdd_path):
out_rdd_amandeep = sc.sequenceFile(out_rdd_path).mapValues(amandeep_dict_str_to_out)
except Exception as inst:
# would mean file existed but corrupted?
pass
if out_rdd_amandeep is not None:
# consider already processed
print("[compute_out_rdd] out_rdd already computed for update {}.".format(incr_update_id))
# we should try to check if saving to hbase_man_sha1infos_out has completed
save_out_rdd_to_hbase(out_rdd_amandeep, hbase_man_sha1infos_out)
return out_rdd_amandeep
## try to reload rdds that could have already been computed, compute chain of dependencies if needed
# propagate down es_ts_end
update_rdd = get_update_rdd(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, incr_update_id, nb_partitions, c_options)
if update_rdd is None:
print("[compute_out_rdd] update_rdd is empty.")
save_info_incremental_update(hbase_man_update_out, incr_update_id, 0, rdd_name+"_count")
save_info_incremental_update(hbase_man_update_out, incr_update_id, "EMPTY", rdd_name+"_path")
return None
## update cdr_ids, and parents cdr_ids for these new sha1s
print("[compute_out_rdd] reading from hbase_man_sha1infos_join to get sha1_infos_rdd.")
sha1_infos_rdd = hbase_man_sha1infos_join.read_hbase_table()
# we may need to merge some 'all_cdr_ids' and 'all_parent_ids'
if not sha1_infos_rdd.isEmpty():
print("[compute_out_rdd] partitioning update_rdd.")
update_rdd_partitioned = update_rdd.partitionBy(nb_partitions)
print("[compute_out_rdd] partitioning sha1_infos_rdd.")
sha1_infos_rdd_json = sha1_infos_rdd.flatMap(sha1_key_json).partitionBy(nb_partitions)
print("[compute_out_rdd] joining sha1_infos_rdd and update_rdd.")
join_rdd = update_rdd_partitioned.leftOuterJoin(sha1_infos_rdd_json).flatMap(flatten_leftjoin)
out_rdd_amandeep = join_rdd
else: # first update
out_rdd_amandeep = update_rdd
# save rdd
if c_options.save_inter_rdd:
try:
if not hdfs_file_exist(out_rdd_path):
# we should discard based on c_options.max_images_dig here actually
out_rdd_save = out_rdd_amandeep.filter(filter_out_rdd).map(out_to_amandeep_dict_str)
if not out_rdd_save.isEmpty():
out_rdd_save.saveAsSequenceFile(out_rdd_path)
save_info_incremental_update(hbase_man_update_out, incr_update_id, out_rdd_path, rdd_name+"_path")
else:
print("[compute_out_rdd] 'out_rdd_save' is empty.")
save_info_incremental_update(hbase_man_update_out, incr_update_id, "EMPTY", rdd_name+"_path")
else:
print("[compute_out_rdd] Skipped saving out_rdd. File already exists at {}.".format(out_rdd_path))
#return None
# org.apache.hadoop.mapred.FileAlreadyExistsException
except Exception as inst:
print("[compute_out_rdd] could not save rdd at {}, error was {}.".format(out_rdd_path, inst))
# save to HBase
save_out_rdd_to_hbase(out_rdd_amandeep, hbase_man_sha1infos_out)
# ## write out rdd of new images
# out_rdd = out_rdd_amandeep.flatMap(split_sha1_kv_images_discarded)
# if not out_rdd.isEmpty():
# print("[compute_out_rdd] saving 'out_rdd' to sha1_infos HBase table.")
# hbase_man_sha1infos_out.rdd2hbase(out_rdd)
# # how to be sure this as completed?
# else:
# print("[compute_out_rdd] 'out_rdd' is empty.")
return out_rdd_amandeep
def save_out_rdd_to_hbase(out_rdd_amandeep, hbase_man_sha1infos_out):
if out_rdd_amandeep is not None:
# write out rdd of new images
out_rdd = out_rdd_amandeep.flatMap(split_sha1_kv_images_discarded)
if not out_rdd.isEmpty():
print("[save_out_rdd_to_hbase] saving 'out_rdd' to sha1_infos HBase table.")
hbase_man_sha1infos_out.rdd2hbase(out_rdd)
# how to be sure this as completed?
else:
print("[save_out_rdd_to_hbase] 'out_rdd' is empty.")
else:
print("[save_out_rdd_to_hbase] 'out_rdd_amandeep' is None.")
##-------------
def incremental_update(es_man, hbase_man_ts, hbase_man_cdrinfos_out, hbase_man_sha1infos_join, hbase_man_sha1infos_out, hbase_man_s3url_sha1_in, hbase_man_s3url_sha1_out, hbase_man_update_out, nb_partitions, c_options):
# We should query to get all data from LAST day
print("Will process full day before {}".format(c_options.day_to_process))
start_date = dateutil.parser.parse(c_options.day_to_process)
# es_ts_end could be set to None if uptonow was set to True
# ES timestamp in milliseconds
es_ts_end = calendar.timegm(start_date.utctimetuple())*1000
es_ts_start = es_ts_end - day_gap
print("Will query CDR from {} to {}".format(es_ts_start, es_ts_end))
# We should propagate down es_ts_start AND es_ts_end
restart = c_options.restart
save_inter_rdd = c_options.save_inter_rdd
identifier = c_options.identifier
day_to_process = c_options.day_to_process
batch_update_size = c_options.batch_update_size
start_time = time.time()
## set incr_update_id
if c_options.restart:
if not c_options.identifier:
raise ValueError('[incremental_update: error] Trying to restart without specifying update identifier.')
incr_update_id = c_options.identifier
else:
if c_options.day_to_process:
incr_update_id = datetime.date.fromtimestamp((es_ts_start)/1000).isoformat()
else:
incr_update_id = 'incremental_update_'+str(max_ts-int(start_time*1000))
#basepath_save = '/user/skaraman/data/images_incremental_update/'+incr_update_id
basepath_save = base_incremental_path+incr_update_id+'/images/info'
if c_options.join_s3url:
## compute update for s3 urls we already now
out_join_rdd = compute_out_join_rdd(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, hbase_man_sha1infos_out, incr_update_id, nb_partitions, c_options, start_time)
## save potential new images in out_join_rdd by batch of 10000 to be indexed?
# They should have been indexed the first time they have been seen... But download could have failed etc.
# Might be good to retry image without cu_feat_id here when indexing has catched up
#save_new_images_for_index(out_join_rdd, hbase_man_update_out, incr_update_id, batch_update_size, "new_images_to_index_join")
## compute update for new s3 urls
out_rdd = compute_out_rdd(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, hbase_man_sha1infos_out, incr_update_id, nb_partitions, c_options, start_time)
if out_rdd is not None and not out_rdd.isEmpty():
save_new_images_for_index(basepath_save, out_rdd, hbase_man_update_out, incr_update_id, batch_update_size, c_options, "new_images_to_index")
save_new_s3_url(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, incr_update_id, nb_partitions, c_options)
update_elapsed_time = time.time() - start_time
save_info_incremental_update(hbase_man_update_out, incr_update_id, str(update_elapsed_time), "update_elapsed_time")
## MAIN
if __name__ == '__main__':
start_time = time.time()
# Parse options
parser = OptionParser()
parser.add_option("-r", "--restart", dest="restart", default=False, action="store_true")
parser.add_option("-i", "--identifier", dest="identifier") # redudant with day to process now...
parser.add_option("-d", "--day_to_process", dest="day_to_process", default=datetime.date.today().isoformat())
parser.add_option("-s", "--save", dest="save_inter_rdd", default=False, action="store_true")
parser.add_option("-j", "--join_s3url", dest="join_s3url", default=False, action="store_true")
parser.add_option("-u", "--uptonow", dest="uptonow", default=False, action="store_true")
parser.add_option("-b", "--batch_update_size", dest="batch_update_size", default=10000)
# expose max_images_dig so Amandeep can change that on the fly if needed
parser.add_option("-m", "--max_images_dig", dest="max_images_dig", default=50000)
# we could add options for uptonow, auto join based on number of s3_urls to download
(c_options, args) = parser.parse_args()
print "Got options:", c_options
# Read job_conf
job_conf = json.load(open("job_conf_notcommited"+dev_release_suffix+".json","rt"))
print job_conf
sc = SparkContext(appName="images_incremental_update"+dev_release_suffix)
conf = SparkConf()
log4j = sc._jvm.org.apache.log4j
log4j.LogManager.getRootLogger().setLevel(log4j.Level.ERROR)
# Set parameters job_conf
# should this be estimated from RDD counts actually?
nb_partitions = job_conf["nb_partitions"]
# HBase Conf
hbase_host = job_conf["hbase_host"]
tab_ts_name = job_conf["tab_ts_name"]
hbase_man_ts = HbaseManager(sc, conf, hbase_host, tab_ts_name)
tab_cdrid_infos_name = job_conf["tab_cdrid_infos_name"]
tab_sha1_infos_name = job_conf["tab_sha1_infos_name"]
tab_s3url_sha1_name = job_conf["tab_s3url_sha1_name"]
tab_update_name = job_conf["tab_update_name"]
# this is the maximum number of cdr_ids for an image to be saved to HBase
max_images_hbase = job_conf["max_images"]
# this is the maximum number of cdr_ids for an image to be saved to HDFS for dig
max_images_dig = c_options.max_images_dig
max_images_reduce = max(max_images_hbase, max_images_dig)
# Setup HBase managers
join_columns_list = [':'.join(x) for x in fields_list]
hbase_man_sha1infos_join = HbaseManager(sc, conf, hbase_host, tab_sha1_infos_name, columns_list=join_columns_list)
hbase_man_sha1infos_out = HbaseManager(sc, conf, hbase_host, tab_sha1_infos_name)
hbase_man_cdrinfos_out = HbaseManager(sc, conf, hbase_host, tab_cdrid_infos_name)
hbase_man_update_out = HbaseManager(sc, conf, hbase_host, tab_update_name, time_sleep=time_sleep_update_out)
# actually only needed if join_s3url is True
hbase_man_s3url_sha1_in = HbaseManager(sc, conf, hbase_host, tab_s3url_sha1_name)
hbase_man_s3url_sha1_out = HbaseManager(sc, conf, hbase_host, tab_s3url_sha1_name)
# ES conf
es_index = job_conf["es_index"]
es_domain = job_conf["es_domain"]
es_host = job_conf["es_host"]
es_port = job_conf["es_port"]
es_user = job_conf["es_user"]
es_pass = job_conf["es_pass"]
# deprecated
#es_ts_start = job_conf["query_timestamp_start"]
# Setup ES manager
es_man = ES(sc, conf, es_index, es_domain, es_host, es_port, es_user, es_pass)
es_man.set_output_json()
es_man.set_read_metadata()
# Run update
incremental_update(es_man, hbase_man_ts, hbase_man_cdrinfos_out, hbase_man_sha1infos_join, hbase_man_sha1infos_out, hbase_man_s3url_sha1_in, hbase_man_s3url_sha1_out, hbase_man_update_out, nb_partitions, c_options)
print("[DONE] Update for day {} done in {}s.".format(c_options.day_to_process, time.time() - start_time))
| bsd-2-clause | 2,145,436,833,996,540,200 | 48.189799 | 322 | 0.640768 | false |
twitter/pants | src/python/pants/base/worker_pool.py | 1 | 8243 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import multiprocessing
import threading
from builtins import next, object
from multiprocessing.pool import ThreadPool
from future.moves import _thread
from pants.reporting.report import Report
class Work(object):
"""Represents multiple concurrent calls to the same callable."""
def __init__(self, func, args_tuples, workunit_name=None):
# A callable.
self.func = func
# A list of tuples of args. func will be called once per tuple, concurrently.
# The length of this list is the cardinality of the work.
self.args_tuples = args_tuples
# If specified, each invocation will be executed in a workunit of this name.
self.workunit_name = workunit_name
class WorkerPool(object):
"""A pool of workers.
Workers are threads, and so are subject to GIL constraints. Submitting CPU-bound work
may not be effective. Use this class primarily for IO-bound work.
"""
def __init__(self, parent_workunit, run_tracker, num_workers):
self._run_tracker = run_tracker
# All workers accrue work to the same root.
self._pool = ThreadPool(processes=num_workers,
initializer=self._run_tracker.register_thread,
initargs=(parent_workunit, ))
# We mustn't shutdown when there are pending workchains, as they may need to submit work
# in the future, and the pool doesn't know about this yet.
self._pending_workchains = 0
self._pending_workchains_cond = threading.Condition() # Protects self._pending_workchains.
self._shutdown_hooks = []
self.num_workers = num_workers
def add_shutdown_hook(self, hook):
self._shutdown_hooks.append(hook)
def submit_async_work(self, work, workunit_parent=None, on_success=None, on_failure=None):
"""Submit work to be executed in the background.
:param work: The work to execute.
:param workunit_parent: If specified, work is accounted for under this workunit.
:param on_success: If specified, a callable taking a single argument, which will be a list
of return values of each invocation, in order. Called only if all work succeeded.
:param on_failure: If specified, a callable taking a single argument, which is an exception
thrown in the work.
:return: `multiprocessing.pool.MapResult`
Don't do work in on_success: not only will it block the result handling thread, but
that thread is not a worker and doesn't have a logging context etc. Use it just to
submit further work to the pool.
"""
if work is None or len(work.args_tuples) == 0: # map_async hangs on 0-length iterables.
if on_success:
on_success([])
else:
def do_work(*args):
self._do_work(work.func, *args, workunit_name=work.workunit_name,
workunit_parent=workunit_parent, on_failure=on_failure)
return self._pool.map_async(do_work, work.args_tuples, chunksize=1, callback=on_success)
def submit_async_work_chain(self, work_chain, workunit_parent, done_hook=None):
"""Submit work to be executed in the background.
- work_chain: An iterable of Work instances. Will be invoked serially. Each instance may
have a different cardinality. There is no output-input chaining: the argument
tuples must already be present in each work instance. If any work throws an
exception no subsequent work in the chain will be attempted.
- workunit_parent: Work is accounted for under this workunit.
- done_hook: If not None, invoked with no args after all work is done, or on error.
"""
def done():
if done_hook:
done_hook()
with self._pending_workchains_cond:
self._pending_workchains -= 1
self._pending_workchains_cond.notify()
def error(e):
done()
self._run_tracker.log(Report.ERROR, '{}'.format(e))
# We filter out Nones defensively. There shouldn't be any, but if a bug causes one,
# Pants might hang indefinitely without this filtering.
work_iter = (_f for _f in work_chain if _f)
def submit_next():
try:
self.submit_async_work(next(work_iter), workunit_parent=workunit_parent,
on_success=lambda x: submit_next(), on_failure=error)
except StopIteration:
done() # The success case.
with self._pending_workchains_cond:
self._pending_workchains += 1
try:
submit_next()
except Exception as e: # Handles errors in the submission code.
done()
self._run_tracker.log(Report.ERROR, '{}'.format(e))
raise
def submit_work_and_wait(self, work, workunit_parent=None):
"""Submit work to be executed on this pool, but wait for it to complete.
- work: The work to execute.
- workunit_parent: If specified, work is accounted for under this workunit.
Returns a list of return values of each invocation, in order. Throws if any invocation does.
"""
if work is None or len(work.args_tuples) == 0: # map hangs on 0-length iterables.
return []
else:
def do_work(*args):
return self._do_work(work.func, *args, workunit_name=work.workunit_name,
workunit_parent=workunit_parent)
# We need to specify a timeout explicitly, because otherwise python ignores SIGINT when waiting
# on a condition variable, so we won't be able to ctrl-c out.
return self._pool.map_async(do_work, work.args_tuples, chunksize=1).get(timeout=1000000000)
def _do_work(self, func, args_tuple, workunit_name, workunit_parent, on_failure=None):
try:
if workunit_name:
with self._run_tracker.new_workunit_under_parent(name=workunit_name, parent=workunit_parent):
return func(*args_tuple)
else:
return func(*args_tuple)
except KeyboardInterrupt:
# If a worker thread intercepts a KeyboardInterrupt, we want to propagate it to the main
# thread.
_thread.interrupt_main()
raise
except Exception as e:
if on_failure:
# Note that here the work's workunit is closed. So, e.g., it's OK to use on_failure()
# to close an ancestor workunit.
on_failure(e)
raise
def shutdown(self):
with self._pending_workchains_cond:
while self._pending_workchains > 0:
self._pending_workchains_cond.wait()
self._pool.close()
self._pool.join()
for hook in self._shutdown_hooks:
hook()
def abort(self):
self._pool.terminate()
class SubprocPool(object):
"""Singleton for managing multiprocessing.Pool instances
Subprocesses (including multiprocessing.Pool workers) can inherit locks in poorly written
libraries (eg zlib) if other threads in the parent process happen to be holding them at the
moment the worker is fork()'ed. Thus it is important to create any subprocesses BEFORE
starting any threads, or they may deadlock mysteriously when sent a particular piece of work.
This is accomplished in pants by these initializing pools early, when creating the RunTracker.
However, in tests, RunTrackers are created repeatedly, as part of creating Contexts that
are used briefly and discarded. Creating a new subprocess pool every time is expensive, and will
lead to os.fork failing once too many processes are spawned.
To avoid this, the pools themselves are kept in this singleton and new RunTrackers re-use them.
"""
_pool = None
_lock = threading.Lock()
_num_processes = multiprocessing.cpu_count()
@classmethod
def set_num_processes(cls, num_processes):
cls._num_processes = num_processes
@classmethod
def foreground(cls):
with cls._lock:
if cls._pool is None:
cls._pool = ThreadPool(processes=cls._num_processes)
return cls._pool
@classmethod
def shutdown(cls, force):
with cls._lock:
old = cls._pool
cls._pool = None
if old:
if force:
old.terminate()
else:
old.close()
old.join()
| apache-2.0 | -2,454,205,278,918,337,000 | 37.339535 | 101 | 0.678151 | false |
hycis/Pynet | pynet/cost.py | 1 | 3580 | __author__ = "Zhenzhou Wu"
__copyright__ = "Copyright 2012, Zhenzhou Wu"
__credits__ = ["Zhenzhou Wu"]
__license__ = "3-clause BSD"
__email__ = "[email protected]"
__maintainer__ = "Zhenzhou Wu"
import theano.tensor as T
import theano
from pynet.utils.utils import theano_unique
floatX = theano.config.floatX
class Cost(object):
"""
Cost inherits MLP so that cost can make use of the
"""
def __init__(self, type = 'nll'):
self.type = type
def get_accuracy(self, y, y_pred):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != y_pred.ndim:
raise TypeError('y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', y_pred.type))
rval = T.eq(y_pred.argmax(axis=1), y.argmax(axis=1)).sum() / y.shape[0]
return rval.astype(floatX)
def positives(self, y, y_pred):
"""
return the number of correctly predicted examples in a batch
"""
rval = T.eq(y_pred.argmax(axis=1), y.argmax(axis=1)).sum()
return rval.astype(floatX)
def get_batch_cost(self, y, y_pred):
return getattr(self, '_batch_cost_' + self.type)(y, y_pred)
def _batch_cost_nll(self, y, y_pred):
"""
return the total cost of all the examples in a batch
"""
rval = T.sum(T.log(y_pred)[T.arange(y.shape[0]), y.argmin(axis=1)])
return rval.astype(floatX)
def confusion_matrix(self, y, y_pred):
#TODO
pass
def get_cost(self, y, y_pred):
return getattr(self, '_cost_' + self.type)(y, y_pred)
def _cost_mse(self, y, y_pred):
L = T.sum(T.sqr(y - y_pred), axis=1)
rval = T.mean(L)
return rval.astype(floatX)
def _cost_entropy(self, y, y_pred):
L = - T.sum(y * T.log(y_pred) + (1-y) * T.log(1-y_pred), axis=1)
rval = T.mean(L)
return rval.astype(floatX)
def _cost_error(self, y, y_pred):
L = T.neq(y_pred.argmax(axis=1), y.argmax(axis=1))
rval = T.mean(L)
return rval.astype(floatX)
def _cost_f1(self, y, y_pred):
#TODO
pass
def _cost_binary_misprecision(self, y, y_pred):
'''
This cost function is only for binary classifications
'''
# assert(theano_unique(y).size == 2)
y_pred = y_pred.argmax(axis=1)
y = y.argmax(axis=1)
TP = (y_pred and y).astype(floatX)
y0 = T.eq(y, 0)
FP = (y0 and y_pred).astype(floatX)
TP = T.sum(TP)
FP = T.sum(FP)
rval = FP / (TP + FP)
return rval
def _cost_FP_minus_TP(self, y, y_pred):
'''
This cost function is only for binary classifications
'''
# assert(theano_unique(y).size == 2)
y_pred = y_pred.argmax(axis=1)
y = y.argmax(axis=1)
TP = (y_pred and y).astype(floatX)
y0 = T.eq(y, 0)
FP = (y0 and y_pred).astype(floatX)
TP = T.mean(TP)
FP = T.mean(FP)
return FP - TP
def _cost_recall(self, y, y_pred):
#TODO
pass
def _cost_abs(self, y, y_pred):
L = T.sum(T.abs_(y - y_pred, axis=1))
rval = T.mean(L)
return rval.astype(floatX)
| apache-2.0 | -6,236,967,485,249,839,000 | 26.96875 | 79 | 0.550279 | false |
3liz/QuickOSM | QuickOSM/core/api/connexion_oapi.py | 1 | 6275 | """Manage Overpass API connexion."""
import logging
import os
import re
from typing import List
from qgis.core import QgsFileDownloader
from qgis.PyQt.QtCore import QDir, QEventLoop, QFileInfo, QTemporaryFile, QUrl
from QuickOSM.core.exceptions import (
NetWorkErrorException,
OverpassBadRequestException,
OverpassManyRequestException,
OverpassMemoryException,
OverpassRuntimeError,
OverpassTimeoutException,
)
__copyright__ = 'Copyright 2019, 3Liz'
__license__ = 'GPL version 3'
__email__ = '[email protected]'
LOGGER = logging.getLogger('QuickOSM')
class ConnexionOAPI:
"""
Manage connexion to the overpass API.
"""
def __init__(self, url: str, convert: bool = False):
"""Constructor of query.
:param url:Full URL of OverPass Query with the query encoded in it.
:type url:str
"""
self._url = QUrl(url)
if convert:
temporary = QTemporaryFile(
os.path.join(QDir.tempPath(), 'request-XXXXXX.txt'))
else:
temporary = QTemporaryFile(
os.path.join(QDir.tempPath(), 'request-XXXXXX.osm'))
temporary.open()
self.result_path = temporary.fileName()
temporary.close()
self.errors = []
def error(self, messages):
self.errors = messages
@staticmethod
def canceled():
LOGGER.info('Request canceled')
# TODO, need to handle this to stop the process.
@staticmethod
def completed():
LOGGER.info('Request completed')
def run_convert(self):
loop = QEventLoop()
downloader = QgsFileDownloader(
self._url, self.result_path, delayStart=True)
downloader.downloadExited.connect(loop.quit)
downloader.downloadError.connect(self.error)
downloader.downloadCanceled.connect(self.canceled)
downloader.downloadCompleted.connect(self.completed)
downloader.startDownload()
loop.exec_()
with open(self.result_path, encoding='utf8') as txt_file:
text = txt_file.read()
query = re.findall("<pre>\\n(.*?)</pre>", text)[0]
return query
def run(self):
"""Run the query.
@raise OverpassBadRequestException,NetWorkErrorException,
OverpassTimeoutException
@return: The result of the query.
@rtype: str
"""
loop = QEventLoop()
downloader = QgsFileDownloader(
self._url, self.result_path, delayStart=True)
downloader.downloadExited.connect(loop.quit)
downloader.downloadError.connect(self.error)
downloader.downloadCanceled.connect(self.canceled)
downloader.downloadCompleted.connect(self.completed)
downloader.startDownload()
loop.exec_()
for message in self.errors:
self.is_query_timed_out(message)
self.too_many_request(message)
self.is_bad_request(message)
LOGGER.error(message)
if len(self.errors):
raise NetWorkErrorException('Overpass API', ', '.join(self.errors))
osm_file = QFileInfo(self.result_path)
if not osm_file.exists() and not osm_file.isFile():
# Do not raise a QuickOSM exception here
# It must be a bug from QuickOSM
raise FileNotFoundError
self.check_file(self.result_path)
# Everything went fine
return self.result_path
@staticmethod
def check_file(path: str):
# The download is done, checking for not complete OSM file.
# Overpass might aborted the request with HTTP 200.
LOGGER.info('Checking OSM file content {}'.format(path))
def last_lines(file_path: str, line_count: int) -> List[str]:
bufsize = 8192
fsize = os.stat(file_path).st_size
iteration = 0
with open(file_path, encoding='utf8') as f:
if bufsize > fsize:
bufsize = fsize - 1
data = []
while True:
iteration += 1
seek_size = fsize - bufsize * iteration
if seek_size < 0:
seek_size = 0
f.seek(seek_size)
data.extend(f.readlines())
if len(data) >= line_count or f.tell() == 0:
line_content = data[-line_count:]
return line_content
else:
return list(f.readlines())
lines = last_lines(path, 10)
# Check if we can use the static method below
timeout = (
'<remark> runtime error: Query timed out in "[a-z]+" at line '
'[\d]+ after ([\d]+) seconds. </remark>')
if re.search(timeout, ''.join(lines)):
raise OverpassTimeoutException
memory = (
'<remark> runtime error: Query ran out of memory in "query" at '
'line [\d]+. It would need at least ([\d]+) (.*) of RAM to '
'continue. </remark>')
search = re.search(memory, ''.join(lines))
if search:
raise OverpassMemoryException(search.group(1), search.group(2))
generic = (
'<remark> runtime error: (.*)</remark>')
search = re.search(generic, ''.join(lines))
if search:
raise OverpassRuntimeError(search.group(1))
@staticmethod
def is_query_timed_out(string: str):
text = 'Network request (.*) timed out'
search = re.search(text, string)
if search:
raise OverpassTimeoutException
@staticmethod
def too_many_request(string: str):
text = '(.*)server replied: Too Many Requests'
search = re.search(text, string)
if search:
raise OverpassManyRequestException
@staticmethod
def is_bad_request(string: str):
text = '(.*)server replied: Bad Request'
search = re.search(text, string)
if search:
raise OverpassBadRequestException
text = '(.*)server replied: Forbidden'
search = re.search(text, string)
if search:
raise OverpassBadRequestException
| gpl-2.0 | 4,600,529,764,808,338,400 | 31.179487 | 79 | 0.578327 | false |
feist/pcs | pcs/test/tools/command_env/mock_node_communicator.py | 1 | 12818 | import json
from urllib.parse import parse_qs
from pcs import settings
from pcs.common import pcs_pycurl as pycurl
from pcs.common.host import Destination
from pcs.common.node_communicator import(
RequestTarget,
RequestData,
Request,
Response,
)
from pcs.test.tools.custom_mock import MockCurlSimple
# pylint: disable=too-many-arguments, protected-access
CALL_TYPE_HTTP_ADD_REQUESTS = "CALL_TYPE_HTTP_ADD_REQUESTS"
CALL_TYPE_HTTP_START_LOOP = "CALL_TYPE_HTTP_START_LOOP"
def log_request(request):
label_data = [
("action", request.action),
("label", request.target.label),
("data", parse_qs(request.data)),
]
if (
request.target.dest_list
!=
[Destination(request.target.label, settings.pcsd_default_port)]
):
label_data.append(
("dest_list", request.target.dest_list)
)
return " ".join([
"{0}:'{1}'".format(key, value) for key, value in label_data
])
def log_response(response, indent=0):
label_data = [
("action", response.request.action),
("label", response.request.target.label),
]
if (
response.request.target.dest_list
!=
[
Destination(
response.request.target.label, settings.pcsd_default_port
),
]
):
label_data.append((
"dest_list",
response.request.target.dest_list
))
label_data.append(("was_connected", response.was_connected))
if response.was_connected:
label_data.append(("respose_code", response.response_code))
else:
label_data.extend([
("errno", response.errno),
("error_msg", response.error_msg),
])
label_data.append(("data", parse_qs(response.request.data)))
return "{0}{1}".format(
" "*indent,
" ".join([
"{0}:'{1}'".format(key, value) for key, value in label_data
]),
)
def different_request_lists(expected_request_list, request_list):
return AssertionError(
(
"Method add_request of NodeCommunicator expected"
" request_list:\n * {0}\nbut got: \n * {1}"
)
.format(
"\n * ".join(log_request(r) for r in expected_request_list),
"\n * ".join(log_request(r) for r in request_list),
)
)
def bad_request_list_content(errors):
return AssertionError(
"NodeCommunicator.add_request got different requests than expected:{0}"
.format("".join([
"\n call index {call_index}:{call_details}".format(
call_index=call_index,
call_details="".join([
"\n mismatch in {option_name}:"
"\n expected: {expected_value}"
"\n real: {real_value}"
.format(
option_name=option_name,
expected_value=pair[0],
real_value=pair[1]
)
for option_name, pair in value.items()
])
)
for call_index, value in errors.items()
]))
)
def _communication_to_response(
label, dest_list, action, param_list, response_code, output, debug_output,
was_connected, errno, error_msg
):
return Response(
MockCurlSimple(
info={pycurl.RESPONSE_CODE: response_code},
output=output,
debug_output=debug_output,
request=Request(
# We do not need to check if token is the right one in tests:
# 1) Library commands tests do not care about tokens. That
# should be covered once in a specialized test, not in every
# single library command test.
# 2) If we need to test teh case when a token is not accepted
# by pcsd, we will do so by setting an appropriate response.
# The actual token value doesn't matter.
RequestTarget(label, dest_list=dest_list, token=None),
RequestData(action, param_list),
)
),
was_connected=was_connected,
errno=errno,
error_msg=error_msg,
)
def create_communication(
communication_list, action="", param_list=None, response_code=200,
output="", debug_output="", was_connected=True, errno=0,
error_msg=None
):
"""
list of dict communication_list -- each dict describes one request-response
it accepts keys:
string label -- required, the label of a node to talk with
dest_list -- list of pcs.common.host.Destination where to send
a request, defaults to [(label, default_pcsd_port)]
string action -- pcsd url, see RequestData
list of pairs param_list -- see RequestData
int response_code -- http response code
string output -- http response output
string debug_output -- pycurl debug output
bool was_connected -- see Response
int errno -- see Response
string error_msg -- see Response
if some key is not present, it is put here from common values - rest
args of this fuction(except name, communication_list,
error_msg_template)
string action -- pcsd url, see RequestData
list of pairs (tuple) param_list -- see RequestData
string response_code -- http response code
string output -- http response output
string debug_output -- pycurl debug output
bool was_connected -- see Response
int errno -- see Response
string error_msg -- see Response
"""
# We don't care about tokens, see _communication_to_response.
common = dict(
action=action,
param_list=param_list if param_list else (),
response_code=response_code,
output=output,
debug_output=debug_output,
was_connected=was_connected,
errno=errno,
error_msg=error_msg,
)
response_list = []
for communication in communication_list:
if "dest_list" not in communication:
communication["dest_list"] = [
Destination(communication["label"], settings.pcsd_default_port)
]
full = common.copy()
full.update(communication)
response_list.append(_communication_to_response(**full))
request_list = [response.request for response in response_list]
return request_list, response_list
def place_multinode_call(
calls, name, node_labels=None, communication_list=None, before=None,
**kwargs
):
"""
Shortcut for adding a call sending the same request to one or more nodes
CallListBuilder calls -- list of expected calls
string name -- the key of this call
list node_labels -- create success responses from these nodes
list communication_list -- use these custom responses
**kwargs -- see __module__.create_communication
"""
if (
(node_labels is None and communication_list is None)
or
(node_labels and communication_list)
):
raise AssertionError(
"Exactly one of 'node_labels', 'communication_list' "
"must be specified"
)
communication_list = (
communication_list if communication_list is not None
else
[{"label": label} for label in node_labels]
)
place_communication(
calls, name, communication_list, before=before, **kwargs
)
def place_requests(calls, name, request_list, before=None):
calls.place(name, AddRequestCall(request_list), before=before)
def place_responses(calls, name, response_list, before=None):
calls.place(name, StartLoopCall(response_list), before=before)
def place_communication(calls, name, communication_list, before=None, **kwargs):
if not communication_list:
# If code runs a communication command with no targets specified, the
# whole communicator and CURL machinery gets started. It doesn't
# actually send any HTTP requests but it adds an empty list of requests
# to CURL and starts the CURL loop. And the mock must do the same.
place_requests(calls, f"{name}_requests", [], before=before)
place_responses(calls, f"{name}_responses", [], before=before)
return
if isinstance(communication_list[0], dict):
communication_list = [communication_list]
request_list = []
response_list = []
for com_list in communication_list:
req_list, res_list = create_communication(com_list, **kwargs)
request_list.append(req_list)
response_list.extend(res_list)
place_requests(calls, f"{name}_requests", request_list[0], before=before)
place_responses(calls, f"{name}_responses", response_list, before=before)
for i, req_list in enumerate(request_list[1:], start=1):
place_requests(calls, f"{name}_requests_{i}", req_list, before=before)
class AddRequestCall:
type = CALL_TYPE_HTTP_ADD_REQUESTS
def __init__(self, request_list):
self.request_list = request_list
def format(self):
return "Requests:\n * {0}".format(
"\n * ".join([
log_request(request) for request in self.request_list
])
)
def __repr__(self):
return str("<HttpAddRequest '{0}'>").format(self.request_list)
class StartLoopCall:
type = CALL_TYPE_HTTP_START_LOOP
def format(self):
return "Responses:\n * {0}".format(
"\n * ".join([
log_response(response) for response in self.response_list
])
)
def __init__(self, response_list):
self.response_list = response_list
def __repr__(self):
return str("<HttpStartLoop '{0}'>").format(self.response_list)
def _compare_request_data(expected, real):
if expected == real:
return True
# If data is in json format it is not possible to compare it as string.
# Because python 3 does not keep key order of dict. So if is response
# builded by json.dumps(some_dict) the result string can vary.
# Let's try known use: [('data_json', 'some_json_here')]
# It means only one pair "data_json" + json string: everything else is False
if len(expected) != 1:
return False
if len(real) != 1:
return False
if expected[0][0] != real[0][0] or expected[0][0] != "data_json":
return False
try:
expected_data = json.loads(expected[0][1])
real_data = json.loads(real[0][1])
return expected_data == real_data
except ValueError:
return False
class NodeCommunicator:
def __init__(self, call_queue=None):
self.__call_queue = call_queue
def add_requests(self, request_list):
_, add_request_call = self.__call_queue.take(
CALL_TYPE_HTTP_ADD_REQUESTS,
request_list,
)
expected_request_list = add_request_call.request_list
if len(expected_request_list) != len(request_list):
raise different_request_lists(expected_request_list, request_list)
errors = {}
for i, real_request in enumerate(request_list):
# We don't care about tokens, see _communication_to_response.
expected_request = add_request_call.request_list[i]
diff = {}
if expected_request.action != real_request.action:
diff["action"] = (expected_request.action, real_request.action)
if expected_request.target.label != real_request.target.label:
diff["target.label"] = (
expected_request.target.label,
real_request.target.label
)
if (
expected_request.target.dest_list
!=
real_request.target.dest_list
):
diff["target.dest_list"] = (
expected_request.target.dest_list,
real_request.target.dest_list
)
if not _compare_request_data(
expected_request._data.structured_data,
real_request._data.structured_data
):
diff["data"] = (
expected_request._data.structured_data,
real_request._data.structured_data,
)
if diff:
errors[i] = diff
if errors:
raise self.__call_queue.error_with_context(
bad_request_list_content(errors)
)
def start_loop(self):
_, call = self.__call_queue.take(CALL_TYPE_HTTP_START_LOOP)
return call.response_list
| gpl-2.0 | 4,100,124,142,848,178,000 | 32.643045 | 80 | 0.586051 | false |
SpaceGroupUCL/qgisSpaceSyntaxToolkit | esstoolkit/external/networkx/algorithms/shortest_paths/tests/test_generic.py | 1 | 15438 | import pytest
import networkx as nx
from networkx.testing import almost_equal
def validate_grid_path(r, c, s, t, p):
assert isinstance(p, list)
assert p[0] == s
assert p[-1] == t
s = ((s - 1) // c, (s - 1) % c)
t = ((t - 1) // c, (t - 1) % c)
assert len(p) == abs(t[0] - s[0]) + abs(t[1] - s[1]) + 1
p = [((u - 1) // c, (u - 1) % c) for u in p]
for u in p:
assert 0 <= u[0] < r
assert 0 <= u[1] < c
for u, v in zip(p[:-1], p[1:]):
assert (abs(v[0] - u[0]), abs(v[1] - u[1])) in [(0, 1), (1, 0)]
class TestGenericPath:
@classmethod
def setup_class(cls):
from networkx import convert_node_labels_to_integers as cnlti
cls.grid = cnlti(nx.grid_2d_graph(4, 4), first_label=1, ordering="sorted")
cls.cycle = nx.cycle_graph(7)
cls.directed_cycle = nx.cycle_graph(7, create_using=nx.DiGraph())
cls.neg_weights = nx.DiGraph()
cls.neg_weights.add_edge(0, 1, weight=1)
cls.neg_weights.add_edge(0, 2, weight=3)
cls.neg_weights.add_edge(1, 3, weight=1)
cls.neg_weights.add_edge(2, 3, weight=-2)
def test_shortest_path(self):
assert nx.shortest_path(self.cycle, 0, 3) == [0, 1, 2, 3]
assert nx.shortest_path(self.cycle, 0, 4) == [0, 6, 5, 4]
validate_grid_path(4, 4, 1, 12, nx.shortest_path(self.grid, 1, 12))
assert nx.shortest_path(self.directed_cycle, 0, 3) == [0, 1, 2, 3]
# now with weights
assert nx.shortest_path(self.cycle, 0, 3, weight="weight") == [0, 1, 2, 3]
assert nx.shortest_path(self.cycle, 0, 4, weight="weight") == [0, 6, 5, 4]
validate_grid_path(
4, 4, 1, 12, nx.shortest_path(self.grid, 1, 12, weight="weight")
)
assert nx.shortest_path(self.directed_cycle, 0, 3, weight="weight") == [
0,
1,
2,
3,
]
# weights and method specified
assert nx.shortest_path(
self.directed_cycle, 0, 3, weight="weight", method="dijkstra"
) == [0, 1, 2, 3]
assert nx.shortest_path(
self.directed_cycle, 0, 3, weight="weight", method="bellman-ford"
) == [0, 1, 2, 3]
# when Dijkstra's will probably (depending on precise implementation)
# incorrectly return [0, 1, 3] instead
assert nx.shortest_path(
self.neg_weights, 0, 3, weight="weight", method="bellman-ford"
) == [0, 2, 3]
# confirm bad method rejection
pytest.raises(ValueError, nx.shortest_path, self.cycle, method="SPAM")
# confirm absent source rejection
pytest.raises(nx.NodeNotFound, nx.shortest_path, self.cycle, 8)
def test_shortest_path_target(self):
answer = {0: [0, 1], 1: [1], 2: [2, 1]}
sp = nx.shortest_path(nx.path_graph(3), target=1)
assert sp == answer
# with weights
sp = nx.shortest_path(nx.path_graph(3), target=1, weight="weight")
assert sp == answer
# weights and method specified
sp = nx.shortest_path(
nx.path_graph(3), target=1, weight="weight", method="dijkstra"
)
assert sp == answer
sp = nx.shortest_path(
nx.path_graph(3), target=1, weight="weight", method="bellman-ford"
)
assert sp == answer
def test_shortest_path_length(self):
assert nx.shortest_path_length(self.cycle, 0, 3) == 3
assert nx.shortest_path_length(self.grid, 1, 12) == 5
assert nx.shortest_path_length(self.directed_cycle, 0, 4) == 4
# now with weights
assert nx.shortest_path_length(self.cycle, 0, 3, weight="weight") == 3
assert nx.shortest_path_length(self.grid, 1, 12, weight="weight") == 5
assert nx.shortest_path_length(self.directed_cycle, 0, 4, weight="weight") == 4
# weights and method specified
assert (
nx.shortest_path_length(
self.cycle, 0, 3, weight="weight", method="dijkstra"
)
== 3
)
assert (
nx.shortest_path_length(
self.cycle, 0, 3, weight="weight", method="bellman-ford"
)
== 3
)
# confirm bad method rejection
pytest.raises(ValueError, nx.shortest_path_length, self.cycle, method="SPAM")
# confirm absent source rejection
pytest.raises(nx.NodeNotFound, nx.shortest_path_length, self.cycle, 8)
def test_shortest_path_length_target(self):
answer = {0: 1, 1: 0, 2: 1}
sp = dict(nx.shortest_path_length(nx.path_graph(3), target=1))
assert sp == answer
# with weights
sp = nx.shortest_path_length(nx.path_graph(3), target=1, weight="weight")
assert sp == answer
# weights and method specified
sp = nx.shortest_path_length(
nx.path_graph(3), target=1, weight="weight", method="dijkstra"
)
assert sp == answer
sp = nx.shortest_path_length(
nx.path_graph(3), target=1, weight="weight", method="bellman-ford"
)
assert sp == answer
def test_single_source_shortest_path(self):
p = nx.shortest_path(self.cycle, 0)
assert p[3] == [0, 1, 2, 3]
assert p == nx.single_source_shortest_path(self.cycle, 0)
p = nx.shortest_path(self.grid, 1)
validate_grid_path(4, 4, 1, 12, p[12])
# now with weights
p = nx.shortest_path(self.cycle, 0, weight="weight")
assert p[3] == [0, 1, 2, 3]
assert p == nx.single_source_dijkstra_path(self.cycle, 0)
p = nx.shortest_path(self.grid, 1, weight="weight")
validate_grid_path(4, 4, 1, 12, p[12])
# weights and method specified
p = nx.shortest_path(self.cycle, 0, method="dijkstra", weight="weight")
assert p[3] == [0, 1, 2, 3]
assert p == nx.single_source_shortest_path(self.cycle, 0)
p = nx.shortest_path(self.cycle, 0, method="bellman-ford", weight="weight")
assert p[3] == [0, 1, 2, 3]
assert p == nx.single_source_shortest_path(self.cycle, 0)
def test_single_source_shortest_path_length(self):
ans = dict(nx.shortest_path_length(self.cycle, 0))
assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert ans == dict(nx.single_source_shortest_path_length(self.cycle, 0))
ans = dict(nx.shortest_path_length(self.grid, 1))
assert ans[16] == 6
# now with weights
ans = dict(nx.shortest_path_length(self.cycle, 0, weight="weight"))
assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert ans == dict(nx.single_source_dijkstra_path_length(self.cycle, 0))
ans = dict(nx.shortest_path_length(self.grid, 1, weight="weight"))
assert ans[16] == 6
# weights and method specified
ans = dict(
nx.shortest_path_length(self.cycle, 0, weight="weight", method="dijkstra")
)
assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert ans == dict(nx.single_source_dijkstra_path_length(self.cycle, 0))
ans = dict(
nx.shortest_path_length(
self.cycle, 0, weight="weight", method="bellman-ford"
)
)
assert ans == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert ans == dict(nx.single_source_bellman_ford_path_length(self.cycle, 0))
def test_all_pairs_shortest_path(self):
p = nx.shortest_path(self.cycle)
assert p[0][3] == [0, 1, 2, 3]
assert p == dict(nx.all_pairs_shortest_path(self.cycle))
p = nx.shortest_path(self.grid)
validate_grid_path(4, 4, 1, 12, p[1][12])
# now with weights
p = nx.shortest_path(self.cycle, weight="weight")
assert p[0][3] == [0, 1, 2, 3]
assert p == dict(nx.all_pairs_dijkstra_path(self.cycle))
p = nx.shortest_path(self.grid, weight="weight")
validate_grid_path(4, 4, 1, 12, p[1][12])
# weights and method specified
p = nx.shortest_path(self.cycle, weight="weight", method="dijkstra")
assert p[0][3] == [0, 1, 2, 3]
assert p == dict(nx.all_pairs_dijkstra_path(self.cycle))
p = nx.shortest_path(self.cycle, weight="weight", method="bellman-ford")
assert p[0][3] == [0, 1, 2, 3]
assert p == dict(nx.all_pairs_bellman_ford_path(self.cycle))
def test_all_pairs_shortest_path_length(self):
ans = dict(nx.shortest_path_length(self.cycle))
assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert ans == dict(nx.all_pairs_shortest_path_length(self.cycle))
ans = dict(nx.shortest_path_length(self.grid))
assert ans[1][16] == 6
# now with weights
ans = dict(nx.shortest_path_length(self.cycle, weight="weight"))
assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert ans == dict(nx.all_pairs_dijkstra_path_length(self.cycle))
ans = dict(nx.shortest_path_length(self.grid, weight="weight"))
assert ans[1][16] == 6
# weights and method specified
ans = dict(
nx.shortest_path_length(self.cycle, weight="weight", method="dijkstra")
)
assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert ans == dict(nx.all_pairs_dijkstra_path_length(self.cycle))
ans = dict(
nx.shortest_path_length(self.cycle, weight="weight", method="bellman-ford")
)
assert ans[0] == {0: 0, 1: 1, 2: 2, 3: 3, 4: 3, 5: 2, 6: 1}
assert ans == dict(nx.all_pairs_bellman_ford_path_length(self.cycle))
def test_has_path(self):
G = nx.Graph()
nx.add_path(G, range(3))
nx.add_path(G, range(3, 5))
assert nx.has_path(G, 0, 2)
assert not nx.has_path(G, 0, 4)
def test_all_shortest_paths(self):
G = nx.Graph()
nx.add_path(G, [0, 1, 2, 3])
nx.add_path(G, [0, 10, 20, 3])
assert [[0, 1, 2, 3], [0, 10, 20, 3]] == sorted(nx.all_shortest_paths(G, 0, 3))
# with weights
G = nx.Graph()
nx.add_path(G, [0, 1, 2, 3])
nx.add_path(G, [0, 10, 20, 3])
assert [[0, 1, 2, 3], [0, 10, 20, 3]] == sorted(
nx.all_shortest_paths(G, 0, 3, weight="weight")
)
# weights and method specified
G = nx.Graph()
nx.add_path(G, [0, 1, 2, 3])
nx.add_path(G, [0, 10, 20, 3])
assert [[0, 1, 2, 3], [0, 10, 20, 3]] == sorted(
nx.all_shortest_paths(G, 0, 3, weight="weight", method="dijkstra")
)
G = nx.Graph()
nx.add_path(G, [0, 1, 2, 3])
nx.add_path(G, [0, 10, 20, 3])
assert [[0, 1, 2, 3], [0, 10, 20, 3]] == sorted(
nx.all_shortest_paths(G, 0, 3, weight="weight", method="bellman-ford")
)
def test_all_shortest_paths_raise(self):
with pytest.raises(nx.NetworkXNoPath):
G = nx.path_graph(4)
G.add_node(4)
list(nx.all_shortest_paths(G, 0, 4))
def test_bad_method(self):
with pytest.raises(ValueError):
G = nx.path_graph(2)
list(nx.all_shortest_paths(G, 0, 1, weight="weight", method="SPAM"))
def test_all_shortest_paths_zero_weight_edge(self):
g = nx.Graph()
nx.add_path(g, [0, 1, 3])
nx.add_path(g, [0, 1, 2, 3])
g.edges[1, 2]["weight"] = 0
paths30d = list(
nx.all_shortest_paths(g, 3, 0, weight="weight", method="dijkstra")
)
paths03d = list(
nx.all_shortest_paths(g, 0, 3, weight="weight", method="dijkstra")
)
paths30b = list(
nx.all_shortest_paths(g, 3, 0, weight="weight", method="bellman-ford")
)
paths03b = list(
nx.all_shortest_paths(g, 0, 3, weight="weight", method="bellman-ford")
)
assert sorted(paths03d) == sorted(p[::-1] for p in paths30d)
assert sorted(paths03d) == sorted(p[::-1] for p in paths30b)
assert sorted(paths03b) == sorted(p[::-1] for p in paths30b)
class TestAverageShortestPathLength:
def test_cycle_graph(self):
ans = nx.average_shortest_path_length(nx.cycle_graph(7))
assert almost_equal(ans, 2)
def test_path_graph(self):
ans = nx.average_shortest_path_length(nx.path_graph(5))
assert almost_equal(ans, 2)
def test_weighted(self):
G = nx.Graph()
nx.add_cycle(G, range(7), weight=2)
ans = nx.average_shortest_path_length(G, weight="weight")
assert almost_equal(ans, 4)
G = nx.Graph()
nx.add_path(G, range(5), weight=2)
ans = nx.average_shortest_path_length(G, weight="weight")
assert almost_equal(ans, 4)
def test_specified_methods(self):
G = nx.Graph()
nx.add_cycle(G, range(7), weight=2)
ans = nx.average_shortest_path_length(G, weight="weight", method="dijkstra")
assert almost_equal(ans, 4)
ans = nx.average_shortest_path_length(G, weight="weight", method="bellman-ford")
assert almost_equal(ans, 4)
ans = nx.average_shortest_path_length(
G, weight="weight", method="floyd-warshall"
)
assert almost_equal(ans, 4)
G = nx.Graph()
nx.add_path(G, range(5), weight=2)
ans = nx.average_shortest_path_length(G, weight="weight", method="dijkstra")
assert almost_equal(ans, 4)
ans = nx.average_shortest_path_length(G, weight="weight", method="bellman-ford")
assert almost_equal(ans, 4)
ans = nx.average_shortest_path_length(
G, weight="weight", method="floyd-warshall"
)
assert almost_equal(ans, 4)
def test_disconnected(self):
g = nx.Graph()
g.add_nodes_from(range(3))
g.add_edge(0, 1)
pytest.raises(nx.NetworkXError, nx.average_shortest_path_length, g)
g = g.to_directed()
pytest.raises(nx.NetworkXError, nx.average_shortest_path_length, g)
def test_trivial_graph(self):
"""Tests that the trivial graph has average path length zero,
since there is exactly one path of length zero in the trivial
graph.
For more information, see issue #1960.
"""
G = nx.trivial_graph()
assert nx.average_shortest_path_length(G) == 0
def test_null_graph(self):
with pytest.raises(nx.NetworkXPointlessConcept):
nx.average_shortest_path_length(nx.null_graph())
def test_bad_method(self):
with pytest.raises(ValueError):
G = nx.path_graph(2)
nx.average_shortest_path_length(G, weight="weight", method="SPAM")
class TestAverageShortestPathLengthNumpy:
@classmethod
def setup_class(cls):
global numpy
global npt
import pytest
numpy = pytest.importorskip("numpy")
npt = pytest.importorskip("numpy.testing")
def test_specified_methods_numpy(self):
G = nx.Graph()
nx.add_cycle(G, range(7), weight=2)
ans = nx.average_shortest_path_length(
G, weight="weight", method="floyd-warshall-numpy"
)
npt.assert_almost_equal(ans, 4)
G = nx.Graph()
nx.add_path(G, range(5), weight=2)
ans = nx.average_shortest_path_length(
G, weight="weight", method="floyd-warshall-numpy"
)
npt.assert_almost_equal(ans, 4)
| gpl-3.0 | 4,471,098,378,276,850,000 | 39.519685 | 88 | 0.559917 | false |
OpenMined/PySyft | packages/syft/src/syft/proto/core/node/common/action/exception_action_pb2.py | 1 | 6244 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proto/core/node/common/action/exception_action.proto
"""Generated protocol buffer code."""
# third party
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
# syft absolute
from syft.proto.core.common import (
common_object_pb2 as proto_dot_core_dot_common_dot_common__object__pb2,
)
from syft.proto.core.io import address_pb2 as proto_dot_core_dot_io_dot_address__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="proto/core/node/common/action/exception_action.proto",
package="syft.core.node.common.service",
syntax="proto3",
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n4proto/core/node/common/action/exception_action.proto\x12\x1dsyft.core.node.common.service\x1a%proto/core/common/common_object.proto\x1a\x1bproto/core/io/address.proto"\xc9\x01\n\x10\x45xceptionMessage\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12&\n\x07\x61\x64\x64ress\x18\x02 \x01(\x0b\x32\x15.syft.core.io.Address\x12\x37\n\x18msg_id_causing_exception\x18\x03 \x01(\x0b\x32\x15.syft.core.common.UID\x12\x16\n\x0e\x65xception_type\x18\x04 \x01(\t\x12\x15\n\rexception_msg\x18\x05 \x01(\tb\x06proto3',
dependencies=[
proto_dot_core_dot_common_dot_common__object__pb2.DESCRIPTOR,
proto_dot_core_dot_io_dot_address__pb2.DESCRIPTOR,
],
)
_EXCEPTIONMESSAGE = _descriptor.Descriptor(
name="ExceptionMessage",
full_name="syft.core.node.common.service.ExceptionMessage",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="msg_id",
full_name="syft.core.node.common.service.ExceptionMessage.msg_id",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="address",
full_name="syft.core.node.common.service.ExceptionMessage.address",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="msg_id_causing_exception",
full_name="syft.core.node.common.service.ExceptionMessage.msg_id_causing_exception",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="exception_type",
full_name="syft.core.node.common.service.ExceptionMessage.exception_type",
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="exception_msg",
full_name="syft.core.node.common.service.ExceptionMessage.exception_msg",
index=4,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=156,
serialized_end=357,
)
_EXCEPTIONMESSAGE.fields_by_name[
"msg_id"
].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID
_EXCEPTIONMESSAGE.fields_by_name[
"address"
].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS
_EXCEPTIONMESSAGE.fields_by_name[
"msg_id_causing_exception"
].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID
DESCRIPTOR.message_types_by_name["ExceptionMessage"] = _EXCEPTIONMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ExceptionMessage = _reflection.GeneratedProtocolMessageType(
"ExceptionMessage",
(_message.Message,),
{
"DESCRIPTOR": _EXCEPTIONMESSAGE,
"__module__": "proto.core.node.common.action.exception_action_pb2"
# @@protoc_insertion_point(class_scope:syft.core.node.common.service.ExceptionMessage)
},
)
_sym_db.RegisterMessage(ExceptionMessage)
# @@protoc_insertion_point(module_scope)
| apache-2.0 | 2,134,787,039,885,428,000 | 34.477273 | 547 | 0.618834 | false |
MG-RAST/Shock | shock-server/plug-ins/boto-s3-download.py | 1 | 2570 | #!/usr/bin/python
# boto3 python client to download files from S3 and check md5
# AWS_ACCESS_KEY_ID .. The access key for your AWS account.
# AWS_SECRET_ACCESS_KEY .. The secret key for your AWS account.
# [email protected]
import sys, getopt, boto3, hashlib, io
import argparse
def md5sum(src, length=io.DEFAULT_BUFFER_SIZE):
md5 = hashlib.md5()
with io.open(src, mode="rb") as fd:
for chunk in iter(lambda: fd.read(length), b''):
md5.update(chunk)
return md5.hexdigest()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a","--keyid", default=None, help=" aws_access_key_id")
parser.add_argument("-b","--bucket", default=None, help="AWS bucket")
parser.add_argument("-t","--tmpfile", default=None,help="filename to create")
parser.add_argument("-o","--objectname", default=None,help="object to download")
parser.add_argument("-k","--accesskey", default=None, help="aws_secret_access_key")
parser.add_argument("-v", "--verbose", action="count", default=0, help="increase output verbosity")
parser.add_argument("-r","--region", default=None, help="AWS region")
parser.add_argument("-s","--s3endpoint", default="https://s3.it.anl.gov:18082")
args = parser.parse_args()
# if args.region is '':
# args.region=' '
if args.verbose:
print ('keyId is =', args.keyid)
print ('accessKey is =', args.accesskey)
print ('bucket is =', args.bucket)
print ('tmpfile is =', args.tmpfile)
print ('region is=', args.region)
print ('object is =', args.objectname)
if args.tmpfile is None:
print ('we need a filename')
sys.exit(2)
# if passed use credentials to establish connection
if args.accesskey is None:
if args.verbose:
print ('using existing credentials from ENV vars or files')
s3 = boto3.client('s3',
endpoint_url=args.s3endpoint,
region_name=args.region
)
else:
# use env. default for connection details --> see https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html
if args.verbose:
print ('using credentials from cmd-line')
s3 = boto3.client('s3',
endpoint_url=args.s3endpoint,
region_name=args.region,
aws_access_key_id=args.keyid,
aws_secret_access_key=args.accesskey
)
with open(args.tmpfile, 'wb') as f:
s3.download_fileobj(args.bucket, args.objectname, f)
md5_new = md5sum(args.tmpfile)
print(md5_new)
sys.exit(0)
main() | bsd-2-clause | 3,889,921,259,526,281,700 | 32.38961 | 134 | 0.638911 | false |
laughingman7743/PyAthena | tests/test_sqlalchemy_athena.py | 1 | 20111 | # -*- coding: utf-8 -*-
import re
import unittest
import uuid
from datetime import date, datetime
from decimal import Decimal
from urllib.parse import quote_plus
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy import String
from sqlalchemy.engine import create_engine
from sqlalchemy.exc import NoSuchTableError, OperationalError, ProgrammingError
from sqlalchemy.sql import expression
from sqlalchemy.sql.schema import Column, MetaData, Table
from sqlalchemy.sql.sqltypes import (
BIGINT,
BINARY,
BOOLEAN,
DATE,
DECIMAL,
FLOAT,
INTEGER,
STRINGTYPE,
TIMESTAMP,
)
from tests.conftest import ENV, SCHEMA
from tests.util import with_engine
class TestSQLAlchemyAthena(unittest.TestCase):
"""Reference test case is following:
https://github.com/dropbox/PyHive/blob/master/pyhive/tests/sqlalchemy_test_case.py
https://github.com/dropbox/PyHive/blob/master/pyhive/tests/test_sqlalchemy_hive.py
https://github.com/dropbox/PyHive/blob/master/pyhive/tests/test_sqlalchemy_presto.py
"""
def create_engine(self, **kwargs):
conn_str = (
"awsathena+rest://athena.{region_name}.amazonaws.com:443/"
+ "{schema_name}?s3_staging_dir={s3_staging_dir}&s3_dir={s3_dir}"
+ "&compression=snappy"
)
if "verify" in kwargs:
conn_str += "&verify={verify}"
if "duration_seconds" in kwargs:
conn_str += "&duration_seconds={duration_seconds}"
if "poll_interval" in kwargs:
conn_str += "&poll_interval={poll_interval}"
if "kill_on_interrupt" in kwargs:
conn_str += "&kill_on_interrupt={kill_on_interrupt}"
return create_engine(
conn_str.format(
region_name=ENV.region_name,
schema_name=SCHEMA,
s3_staging_dir=quote_plus(ENV.s3_staging_dir),
s3_dir=quote_plus(ENV.s3_staging_dir),
**kwargs
)
)
@with_engine()
def test_basic_query(self, engine, conn):
rows = conn.execute("SELECT * FROM one_row").fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0].number_of_rows, 1)
self.assertEqual(len(rows[0]), 1)
@with_engine()
def test_reflect_no_such_table(self, engine, conn):
self.assertRaises(
NoSuchTableError,
lambda: Table("this_does_not_exist", MetaData(bind=engine), autoload=True),
)
self.assertRaises(
NoSuchTableError,
lambda: Table(
"this_does_not_exist",
MetaData(bind=engine),
schema="also_does_not_exist",
autoload=True,
),
)
@with_engine()
def test_reflect_table(self, engine, conn):
one_row = Table("one_row", MetaData(bind=engine), autoload=True)
self.assertEqual(len(one_row.c), 1)
self.assertIsNotNone(one_row.c.number_of_rows)
@with_engine()
def test_reflect_table_with_schema(self, engine, conn):
one_row = Table("one_row", MetaData(bind=engine), schema=SCHEMA, autoload=True)
self.assertEqual(len(one_row.c), 1)
self.assertIsNotNone(one_row.c.number_of_rows)
@with_engine()
def test_reflect_table_include_columns(self, engine, conn):
one_row_complex = Table("one_row_complex", MetaData(bind=engine))
version = float(
re.search(r"^([\d]+\.[\d]+)\..+", sqlalchemy.__version__).group(1)
)
if version <= 1.2:
engine.dialect.reflecttable(
conn, one_row_complex, include_columns=["col_int"], exclude_columns=[]
)
elif version == 1.3:
# https://docs.sqlalchemy.org/en/13/changelog/changelog_13.html
# #change-64ac776996da1a5c3e3460b4c0f0b257
engine.dialect.reflecttable(
conn,
one_row_complex,
include_columns=["col_int"],
exclude_columns=[],
resolve_fks=True,
)
else: # version >= 1.4
# https://docs.sqlalchemy.org/en/14/changelog/changelog_14.html
# #change-0215fae622c01f9409eb1ba2754f4792
# https://docs.sqlalchemy.org/en/14/core/reflection.html
# #sqlalchemy.engine.reflection.Inspector.reflect_table
insp = sqlalchemy.inspect(engine)
insp.reflect_table(
one_row_complex,
include_columns=["col_int"],
exclude_columns=[],
resolve_fks=True,
)
self.assertEqual(len(one_row_complex.c), 1)
self.assertIsNotNone(one_row_complex.c.col_int)
self.assertRaises(AttributeError, lambda: one_row_complex.c.col_tinyint)
@with_engine()
def test_unicode(self, engine, conn):
unicode_str = "密林"
one_row = Table("one_row", MetaData(bind=engine))
returned_str = sqlalchemy.select(
[expression.bindparam("あまぞん", unicode_str, type_=String())],
from_obj=one_row,
).scalar()
self.assertEqual(returned_str, unicode_str)
@with_engine()
def test_reflect_schemas(self, engine, conn):
insp = sqlalchemy.inspect(engine)
schemas = insp.get_schema_names()
self.assertIn(SCHEMA, schemas)
self.assertIn("default", schemas)
@with_engine()
def test_get_table_names(self, engine, conn):
meta = MetaData()
meta.reflect(bind=engine)
print(meta.tables)
self.assertIn("one_row", meta.tables)
self.assertIn("one_row_complex", meta.tables)
insp = sqlalchemy.inspect(engine)
self.assertIn(
"many_rows",
insp.get_table_names(schema=SCHEMA),
)
@with_engine()
def test_has_table(self, engine, conn):
insp = sqlalchemy.inspect(engine)
self.assertTrue(insp.has_table("one_row", schema=SCHEMA))
self.assertFalse(insp.has_table("this_table_does_not_exist", schema=SCHEMA))
@with_engine()
def test_get_columns(self, engine, conn):
insp = sqlalchemy.inspect(engine)
actual = insp.get_columns(table_name="one_row", schema=SCHEMA)[0]
self.assertEqual(actual["name"], "number_of_rows")
self.assertTrue(isinstance(actual["type"], INTEGER))
self.assertTrue(actual["nullable"])
self.assertIsNone(actual["default"])
self.assertEqual(actual["ordinal_position"], 1)
self.assertIsNone(actual["comment"])
@with_engine()
def test_char_length(self, engine, conn):
one_row_complex = Table("one_row_complex", MetaData(bind=engine), autoload=True)
result = (
sqlalchemy.select(
[sqlalchemy.func.char_length(one_row_complex.c.col_string)]
)
.execute()
.scalar()
)
self.assertEqual(result, len("a string"))
@with_engine()
def test_reflect_select(self, engine, conn):
one_row_complex = Table("one_row_complex", MetaData(bind=engine), autoload=True)
self.assertEqual(len(one_row_complex.c), 15)
self.assertIsInstance(one_row_complex.c.col_string, Column)
rows = one_row_complex.select().execute().fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(
list(rows[0]),
[
True,
127,
32767,
2147483647,
9223372036854775807,
0.5,
0.25,
"a string",
datetime(2017, 1, 1, 0, 0, 0),
date(2017, 1, 2),
b"123",
"[1, 2]",
"{1=2, 3=4}",
"{a=1, b=2}",
Decimal("0.1"),
],
)
self.assertIsInstance(one_row_complex.c.col_boolean.type, BOOLEAN)
self.assertIsInstance(one_row_complex.c.col_tinyint.type, INTEGER)
self.assertIsInstance(one_row_complex.c.col_smallint.type, INTEGER)
self.assertIsInstance(one_row_complex.c.col_int.type, INTEGER)
self.assertIsInstance(one_row_complex.c.col_bigint.type, BIGINT)
self.assertIsInstance(one_row_complex.c.col_float.type, FLOAT)
self.assertIsInstance(one_row_complex.c.col_double.type, FLOAT)
self.assertIsInstance(one_row_complex.c.col_string.type, type(STRINGTYPE))
self.assertIsInstance(one_row_complex.c.col_timestamp.type, TIMESTAMP)
self.assertIsInstance(one_row_complex.c.col_date.type, DATE)
self.assertIsInstance(one_row_complex.c.col_binary.type, BINARY)
self.assertIsInstance(one_row_complex.c.col_array.type, type(STRINGTYPE))
self.assertIsInstance(one_row_complex.c.col_map.type, type(STRINGTYPE))
self.assertIsInstance(one_row_complex.c.col_struct.type, type(STRINGTYPE))
self.assertIsInstance(one_row_complex.c.col_decimal.type, DECIMAL)
@with_engine()
def test_reserved_words(self, engine, conn):
"""Presto uses double quotes, not backticks"""
fake_table = Table(
"select", MetaData(bind=engine), Column("current_timestamp", STRINGTYPE)
)
query = str(fake_table.select(fake_table.c.current_timestamp == "a"))
self.assertIn('"select"', query)
self.assertIn('"current_timestamp"', query)
self.assertNotIn("`select`", query)
self.assertNotIn("`current_timestamp`", query)
@with_engine()
def test_retry_if_data_catalog_exception(self, engine, conn):
dialect = engine.dialect
exc = OperationalError(
"", None, "Database does_not_exist not found. Please check your query."
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "does_not_exist", "does_not_exist"
)
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "does_not_exist", "this_does_not_exist"
)
)
self.assertTrue(
dialect._retry_if_data_catalog_exception(
exc, "this_does_not_exist", "does_not_exist"
)
)
self.assertTrue(
dialect._retry_if_data_catalog_exception(
exc, "this_does_not_exist", "this_does_not_exist"
)
)
exc = OperationalError(
"", None, "Namespace does_not_exist not found. Please check your query."
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "does_not_exist", "does_not_exist"
)
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "does_not_exist", "this_does_not_exist"
)
)
self.assertTrue(
dialect._retry_if_data_catalog_exception(
exc, "this_does_not_exist", "does_not_exist"
)
)
self.assertTrue(
dialect._retry_if_data_catalog_exception(
exc, "this_does_not_exist", "this_does_not_exist"
)
)
exc = OperationalError(
"", None, "Table does_not_exist not found. Please check your query."
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "does_not_exist", "does_not_exist"
)
)
self.assertTrue(
dialect._retry_if_data_catalog_exception(
exc, "does_not_exist", "this_does_not_exist"
)
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "this_does_not_exist", "does_not_exist"
)
)
self.assertTrue(
dialect._retry_if_data_catalog_exception(
exc, "this_does_not_exist", "this_does_not_exist"
)
)
exc = OperationalError("", None, "foobar.")
self.assertTrue(
dialect._retry_if_data_catalog_exception(exc, "foobar", "foobar")
)
exc = ProgrammingError(
"", None, "Database does_not_exist not found. Please check your query."
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "does_not_exist", "does_not_exist"
)
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "does_not_exist", "this_does_not_exist"
)
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "this_does_not_exist", "does_not_exist"
)
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "this_does_not_exist", "this_does_not_exist"
)
)
@with_engine()
def test_get_column_type(self, engine, conn):
dialect = engine.dialect
self.assertEqual(dialect._get_column_type("boolean"), "boolean")
self.assertEqual(dialect._get_column_type("tinyint"), "tinyint")
self.assertEqual(dialect._get_column_type("smallint"), "smallint")
self.assertEqual(dialect._get_column_type("integer"), "integer")
self.assertEqual(dialect._get_column_type("bigint"), "bigint")
self.assertEqual(dialect._get_column_type("real"), "real")
self.assertEqual(dialect._get_column_type("double"), "double")
self.assertEqual(dialect._get_column_type("varchar"), "varchar")
self.assertEqual(dialect._get_column_type("timestamp"), "timestamp")
self.assertEqual(dialect._get_column_type("date"), "date")
self.assertEqual(dialect._get_column_type("varbinary"), "varbinary")
self.assertEqual(dialect._get_column_type("array(integer)"), "array")
self.assertEqual(dialect._get_column_type("map(integer, integer)"), "map")
self.assertEqual(dialect._get_column_type("row(a integer, b integer)"), "row")
self.assertEqual(dialect._get_column_type("decimal(10,1)"), "decimal")
@with_engine()
def test_contain_percents_character_query(self, engine, conn):
select = sqlalchemy.sql.text(
"""
SELECT date_parse('20191030', '%Y%m%d')
"""
)
table_expression = sqlalchemy.sql.selectable.TextAsFrom(select, []).cte()
query = sqlalchemy.select(["*"]).select_from(table_expression)
result = engine.execute(query)
self.assertEqual(result.fetchall(), [(datetime(2019, 10, 30),)])
query_with_limit = (
sqlalchemy.sql.select(["*"]).select_from(table_expression).limit(1)
)
result_with_limit = engine.execute(query_with_limit)
self.assertEqual(result_with_limit.fetchall(), [(datetime(2019, 10, 30),)])
@with_engine()
def test_query_with_parameter(self, engine, conn):
select = sqlalchemy.sql.text(
"""
SELECT :word
"""
)
table_expression = sqlalchemy.sql.selectable.TextAsFrom(select, []).cte()
query = sqlalchemy.select(["*"]).select_from(table_expression)
result = engine.execute(query, word="cat")
self.assertEqual(result.fetchall(), [("cat",)])
query_with_limit = (
sqlalchemy.select(["*"]).select_from(table_expression).limit(1)
)
result_with_limit = engine.execute(query_with_limit, word="cat")
self.assertEqual(result_with_limit.fetchall(), [("cat",)])
@with_engine()
def test_contain_percents_character_query_with_parameter(self, engine, conn):
select1 = sqlalchemy.sql.text(
"""
SELECT date_parse('20191030', '%Y%m%d'), :word
"""
)
table_expression1 = sqlalchemy.sql.selectable.TextAsFrom(select1, []).cte()
query1 = sqlalchemy.select(["*"]).select_from(table_expression1)
result1 = engine.execute(query1, word="cat")
self.assertEqual(result1.fetchall(), [(datetime(2019, 10, 30), "cat")])
query_with_limit1 = (
sqlalchemy.select(["*"]).select_from(table_expression1).limit(1)
)
result_with_limit1 = engine.execute(query_with_limit1, word="cat")
self.assertEqual(
result_with_limit1.fetchall(), [(datetime(2019, 10, 30), "cat")]
)
select2 = sqlalchemy.sql.text(
"""
SELECT col_string, :param FROM one_row_complex
WHERE col_string LIKE 'a%' OR col_string LIKE :param
"""
)
table_expression2 = sqlalchemy.sql.selectable.TextAsFrom(select2, []).cte()
query2 = sqlalchemy.select(["*"]).select_from(table_expression2)
result2 = engine.execute(query2, param="b%")
self.assertEqual(result2.fetchall(), [("a string", "b%")])
query_with_limit2 = (
sqlalchemy.select(["*"]).select_from(table_expression2).limit(1)
)
result_with_limit2 = engine.execute(query_with_limit2, param="b%")
self.assertEqual(result_with_limit2.fetchall(), [("a string", "b%")])
@with_engine()
def test_nan_checks(self, engine, conn):
dialect = engine.dialect
self.assertFalse(dialect._is_nan("string"))
self.assertFalse(dialect._is_nan(1))
self.assertTrue(dialect._is_nan(float("nan")))
@with_engine()
def test_to_sql(self, engine, conn):
# TODO pyathena.error.OperationalError: SYNTAX_ERROR: line 1:305:
# Column 'foobar' cannot be resolved.
# def _format_bytes(formatter, escaper, val):
# return val.decode()
table_name = "to_sql_{0}".format(str(uuid.uuid4()).replace("-", ""))
df = pd.DataFrame(
{
"col_int": np.int32([1]),
"col_bigint": np.int64([12345]),
"col_float": np.float32([1.0]),
"col_double": np.float64([1.2345]),
"col_string": ["a"],
"col_boolean": np.bool_([True]),
"col_timestamp": [datetime(2020, 1, 1, 0, 0, 0)],
"col_date": [date(2020, 12, 31)],
# "col_binary": "foobar".encode(),
}
)
# Explicitly specify column order
df = df[
[
"col_int",
"col_bigint",
"col_float",
"col_double",
"col_string",
"col_boolean",
"col_timestamp",
"col_date",
# "col_binary",
]
]
df.to_sql(
table_name,
engine,
schema=SCHEMA,
index=False,
if_exists="replace",
method="multi",
)
table = Table(table_name, MetaData(bind=engine), autoload=True)
self.assertEqual(
table.select().execute().fetchall(),
[
(
1,
12345,
1.0,
1.2345,
"a",
True,
datetime(2020, 1, 1, 0, 0, 0),
date(2020, 12, 31),
# "foobar".encode(),
)
],
)
@with_engine(verify="false")
def test_conn_str_verify(self, engine, conn):
kwargs = conn.connection._kwargs
self.assertFalse(kwargs["verify"])
@with_engine(duration_seconds="1800")
def test_conn_str_duration_seconds(self, engine, conn):
kwargs = conn.connection._kwargs
self.assertEqual(kwargs["duration_seconds"], 1800)
@with_engine(poll_interval="5")
def test_conn_str_poll_interval(self, engine, conn):
self.assertEqual(conn.connection.poll_interval, 5)
@with_engine(kill_on_interrupt="false")
def test_conn_str_kill_on_interrupt(self, engine, conn):
self.assertFalse(conn.connection.kill_on_interrupt)
| mit | -7,513,084,211,599,442,000 | 36.568224 | 88 | 0.565501 | false |
rohanpm/qingfanyi | qingfanyi/process/translate.py | 1 | 3333 | # coding=utf-8
# qingfanyi - Chinese to English translation tool
# Copyright (C) 2016 Rohan McGovern <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading
import traceback
from gi.repository import GLib
from gi.repository import Gtk
from pyatspi import Registry
import qingfanyi.styles
from qingfanyi import debug
from qingfanyi.dict import Dict
from qingfanyi.popup_manager import PopupManager
from qingfanyi.snapshot import Snapshot
from qingfanyi.snapshot_matcher import SnapshotMatcher
from qingfanyi.translate_window import TranslateWindow
from qingfanyi.wm import active_window
class Translate(object):
def __init__(self):
self.dic = Dict()
self.dic.open()
self.condvar = threading.Condition()
self.error = None
def run_in_other_thread(self):
self.condvar.acquire()
GLib.idle_add(self.run_in_this_thread)
self.condvar.wait()
debug('run in other thread done')
if self.error:
raise self.error
def run_in_this_thread(self):
self.condvar.acquire()
self.error = None
try:
self.run()
except Exception as e:
traceback.print_exc()
self.error = e
finally:
self.condvar.notify()
self.condvar.release()
def run(self):
debug('translate running...')
(accessible_window, gdk_window) = active_window()
if not accessible_window:
debug('No active window. Do nothing.')
return
debug('active: %s' % accessible_window)
qingfanyi.styles.init()
debug('taking snapshot')
snapshot = Snapshot(accessible_window, gdk_window)
snapshot_matcher = SnapshotMatcher(snapshot, self.dic)
debug('creating translate window')
translate_win = TranslateWindow(snapshot, snapshot_matcher)
translate_win.show()
snapshot_matcher.start()
PopupManager(translate_win)
# nested loop to make run() blocking
translate_win.connect('hide', lambda *_: Gtk.main_quit())
Gtk.main()
def run_event_loop(self):
debug('starting at-spi loop')
Registry.start(gil=False)
def __del__(self):
debug('closing.')
self.dic.close()
self.dic = None
_INSTANCE = None
def run():
if not _INSTANCE:
raise ValueError('run() called before init()')
_INSTANCE.run_in_other_thread()
debug('run complete')
def init():
global _INSTANCE
if _INSTANCE:
raise ValueError('init() called more than once')
_INSTANCE = Translate()
thread = threading.Thread(target=_INSTANCE.run_event_loop, daemon=True)
thread.start()
| gpl-3.0 | -3,197,915,771,804,864,500 | 27.487179 | 75 | 0.660066 | false |
pombredanne/django-fluent-contents | fluent_contents/plugins/oembeditem/fields.py | 1 | 1167 | from django.core.exceptions import ValidationError
from django.db.models import URLField
from django.utils.translation import ugettext_lazy as _
from fluent_contents.plugins.oembeditem import backend
class OEmbedUrlField(URLField):
"""
URL Field which validates whether the URL is supported by the OEmbed provider.
This feature is provided as model field, so other apps can use the same logic too.
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('help_text', _("Enter the URL of the online content to embed (e.g. a YouTube or Vimeo video, SlideShare presentation, etc..)"))
super(OEmbedUrlField, self).__init__(*args, **kwargs)
def clean(self, *args, **kwargs):
url = super(OEmbedUrlField, self).clean(*args, **kwargs)
if not backend.has_provider_for_url(url):
raise ValidationError(_("The URL is not valid for embedding content")) # or is not configured as provider.
return url
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules([], ["^" + __name__.replace(".", "\.") + "\.OEmbedUrlField"])
| apache-2.0 | 4,268,102,189,696,169,000 | 36.645161 | 153 | 0.688946 | false |
fivethreeo/django-dragoman-blog | dragoman_blog/templatetags/dragoman_blog_tags.py | 1 | 1443 | from django import template
register = template.Library()
@register.inclusion_tag('admin/dragoman_blog/submit_line.html', takes_context=True)
def submit_row(context):
"""
Displays the row of buttons for delete and save.
"""
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
ctx = {
'opts': opts,
'onclick_attrib': (opts.get_ordered_objects() and change
and 'onclick="submitOrderForm();"' or ''),
'show_delete_link': (not is_popup and context['has_delete_permission']
and change and context.get('show_delete', True)),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': context['has_add_permission'] and
not is_popup and (not save_as or context['add']),
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'show_save': True
}
if context.get('original') is not None:
ctx['original'] = context['original']
if context.get('translation_language_code') is not None:
ctx['translation_language_code'] = context['translation_language_code']
if context.get('translation_language_field') is not None:
ctx['translation_language_field'] = context['translation_language_field']
return ctx | bsd-3-clause | -6,141,228,518,920,310,000 | 42.757576 | 84 | 0.616078 | false |
nbstr/demineur | functions.py | 1 | 4297 | #=========================================#
# IMPORTS #
#=========================================#
from beautiful import *
from random import randint
#=========================================#
# CONFIG #
#=========================================#
# DIFFICULTÉ
nb_cases = 9 # !! MAX 26
nb_bombes = 9 # !! MAX nb_cases**2
# SECURITÉ
if(nb_bombes >= nb_cases**2):
nb_bombes = nb_cases**2 - 1
# COORDONNÉES
alpha_maj = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
alpha_num = list(range(1, nb_cases + 1))
#=========================================#
# FUNCTIONS #
#=========================================#
def generer_bombes(hasard=False):
"""
Génère et place des bombes au hasard.
"""
if(hasard):
grille = {}
while len(grille) < (nb_bombes):
x = randint(0, nb_cases-1)
y = randint(0, nb_cases-1)
grille[(x, y)] = "B"
return grille
else:
grille = {}
grille [(0 ,7)] = "B"
grille [(1 ,5)] = "B"
grille [(1 ,6)] = "B"
grille [(1 ,8)] = "B"
grille [(2 ,4)] = "B"
grille [(3 ,4)] = "B"
grille [(5 ,5)] = "B"
grille [(5 ,7)] = "B"
grille [(7 ,0)] = "B"
grille [(7 ,5)] = "B"
return grille
def init_champ():
"""
Initialise le champ de mines.
"""
champ = []
for i in range(nb_cases):
champ.append(["*"] * nb_cases)
return champ
def print_champ(g):
"""
Affiche le champ de mines.
"""
print ("\n\n " + " ".join(str(col) for col in alpha_num))
print (" " + "-"*37)
for i, ligne in enumerate(g):
print (alpha_maj[i] + " | " + " | ".join(ligne) + " |\n " + "-"*37)
print ("\n")
def bombe(coord, grille):
"""
Vérifie s'il y a une bombe aux coordonnées indiquées.
"""
for bombe in grille:
if (bombe == coord):
return True;
return False;
def input_coordonnees():
"""
Demande au joueur de selectionner une case.
"""
# VALIDATION Y
while True:
y = input("• Veuillez entrer la lettre d’une ligne: ")
try:
y = int(alpha_maj[:nb_cases].index(y.upper()))
break
except ValueError:
print("!! La lettre de la ligne est invalide\n")
# VALIDATION X
while True:
x = input_int("• Veuillez entrer le numéro d’une colonne: ")
if(x < 1 or x > nb_cases):
print ("!! Le numéro de la colonne est invalide\n")
else:
x -= 1
break
return (x, y)
def compte_bombes(x, y, grille):
"""
Compte le nombre de bombes aux alentours.
"""
nombre_bombes = 0
for ligne in range(y-1, y+2):
for colonne in range(x-1, x+2):
# VERIFIER SI ON EST TOUJOURS SUR LE CHAMP DE MINES
if(colonne >= 0 and colonne < nb_cases and ligne >= 0 and ligne < nb_cases and (ligne != y or colonne != x)):
if(bombe((colonne, ligne), grille)):
nombre_bombes += 1
else:
return nombre_bombes
def afficher_case(champ, x, y, grille):
"""
Affiche le nombre de bombes adjacentes.
"""
nombre_bombes = compte_bombes(x, y, grille)
# REMPLIR LA CASE
if(nombre_bombes == 0):
champ[y][x] = " "
# AFFICHER LES CASES ADJACENTES
for l in range(y-1, y+2):
for c in range(x-1, x+2):
# VERIFIER SI ON EST TOUJOURS SUR LE CHAMP DE MINES
if(c >= 0 and c < nb_cases and l >= 0 and l < nb_cases and (l != y or c != x) and champ[l][c] == "*"):
sous_compte = compte_bombes(c, l, grille)
if(sous_compte == 0):
champ[l][c] = " "
champ = afficher_case(champ, c, l, grille)
else:
champ[l][c] = str(compte_bombes(c, l, grille))
else:
champ[y][x] = str(nombre_bombes)
return champ
def fin(champ, grille):
count = 0
for ligne in champ:
for element in ligne:
if(element == "*"):
count += 1
return count == len(grille)
| unlicense | -421,806,785,374,238,600 | 24.622754 | 121 | 0.45291 | false |
dbednarski/pyhdust | pyhdust/input.py | 1 | 29469 | # -*- coding:utf-8 -*-
"""
PyHdust *input* module: Hdust input tools.
:co-author: Rodrigo Vieira
:license: GNU GPL v3.0 (https://github.com/danmoser/pyhdust/blob/master/LICENSE)
"""
import os as _os
import numpy as _np
from glob import glob as _glob
from itertools import product as _product
import pyhdust.phc as _phc
import pyhdust as _hdt
__author__ = "Daniel Moser"
__email__ = "[email protected]"
def makeDiskGrid(modn='01', mhvals=[1.5], hvals=[.6], rdvals=[18.6], mvals=None,
sig0vals=None, doFVDD=False, sBdays=None, sBfiles=None, selsources='*',
alpha=.5, mu=.5, R0r=300, Mdot11=False, path=None):
"""
| ###CONFIG. OPTIONS
| #MODEL NUMBER
| modn = '02'
| #The following filter will be applied to the SOURCE selection (string fmt)
| selsources = '*'
|
| #SUPERFICIAL DENSITY PROFILE EXPONENT
| mvals = [1.5,2.0,2.5,3.0]
| #VERTICAL DENSITY PROFILE EXPONENT
| mhvals = [1.5]
| #FRACTION OF TEFF OF PRIMARY STAR
| #This parameter sets if it you be FIXED to OB=1.1 case
| hvals = [72.]
| #DISK RADIUS EQUATORIAL...
| rdvals = [30.]
| #SIGMA_0 VALUES
| sig0vals = _np.logspace(_np.log10(0.02),_np.log10(4.0),7)
|
| #Do the Full VDD model for the corresponding sig0?
| doFVDD = True
| alpha = 0.5
| mu = 0.5
| #WARNING: it only generates a single R0 value per batch. If you want to change
| # it, run it twice (or more)
| R0r = 300
| ###END CONFIG.
"""
G = _phc.G.cgs
Msun = _phc.Msun.cgs
Rsun = _phc.Rsun.cgs
kB = _phc.kB.cgs
mH = _phc.mH.cgs
yr = _phc.yr.cgs
def doPL(prodI):
'''
Given a prodI (i.e., src,sig0,rd,h,m,mh), generates the Power-Law model
input
'''
src,sig0,rd,h,m,mh = prodI
M,Req,Tp = _hdt.readscr(src)
Mstr = str(M)
M *= Msun
Req *= Rsun
Th = h*Tp/100.
#a0 = (kB*h/100.*Tp/mu/mH)**.5
a = (kB*Th/mu/mH)**.5
n0 = (G*M/2./_np.pi)**.5*sig0/mu/mH/a/Req**1.5
#Th = a**2*mu*mH/kB
srcname = src.replace('source/','').replace('.txt','')
suffix = '_PLn{0:.1f}_sig{1:.2f}_h{2:03.0f}_Rd{3:05.1f}_{4}'.format(\
(m+mh),sig0,h,rd,srcname)
wmod = mod[:]
wmod[13]=wmod[13].replace('18.6',('%.2f' % rd))
wmod[20]=wmod[20].replace('2.0',('%.2f' % m))
wmod[33]=wmod[33].replace('1.5',('%.2f' % mh))
wmod[40]=wmod[40].replace('18000.',('%.1f' % Th))
wmod[52]=wmod[52].replace('2.35E13',('%.2e' % n0))
f0=open('mod'+modn+'/mod'+modn+suffix+'.txt', 'w')
f0.writelines(wmod)
f0.close()
return
def doMdot(prodI):
'''
Given a prodI (i.e., src,sig0,rd,h,m,mh), generates the full VDD model
input
'''
src,sig0,rd,h,m,mh = prodI
M,Req,Tp = _hdt.readscr(src)
Mstr = str(M)
M *= Msun
Req *= Rsun
Th = h*Tp/100.
a = (kB*Th/mu/mH)**.5
#a0 = (kB*h/100*Tp/mu/mH)**.5
#a = a0*Req0*Req**.25/Req/Req**.25
R0 = R0r*Req
Mdot = sig0*Req**2*3*_np.pi*alpha*a**2/(G*M*R0)**.5 #SI units
Mdot = Mdot/Msun*yr
#Th = a**2*mu*mH/kB
srcname = src.replace('source/','').replace('.txt','')
#suffix = '_NI_Mdot{:.1e}_Rd{:.1f}_R0{:.1f}_alp{:.1f}_h{:.1f}_{}'.\
#format(Mdot,rd,R0/Req,alpha,h,srcname)
suffix = '_NIa{0:.1f}_sig{1:.2f}_h{2:03.0f}_Rd{3:05.1f}_{4}'.format(\
alpha,sig0,h,rd,srcname)
wmod = mod[:]
wmod[13]=wmod[13].replace('18.6',('%.2f' % rd))
wmod[18]=wmod[18].replace('1',('%d' % 2))
wmod[23]=wmod[23].replace('1.',('%.2f' % alpha))
wmod[24]=wmod[24].replace('= 0.',('= %.2f' % (R0/Req)))
wmod[25]=wmod[25].replace('= 0',('= %d' % 1))
wmod[31]=wmod[31].replace('0',('%d' % 1))
wmod[40]=wmod[40].replace('18000.','{0:.1f}'.format(Th))
wmod[49]=wmod[49].replace('2',('%d' % 3))
wmod[55]=wmod[55].replace('1.E-9',('%.2e' % Mdot))
f0=open('mod'+modn+'/mod'+modn+suffix+'.txt', 'w')
f0.writelines(wmod)
f0.close()
return
def doSB(prodI, hseq=False):
'''
Given a prodI (i.e., sources,rdvals,hvals,mhvals,sBdays,sBfiles),
generates the Single Be based model input
'''
src,rd,h,mh,day,sfile = prodI
M,Req,Tp = _hdt.readscr(src)
Mstr = str(M)
M *= Msun
Req *= Rsun
Th = h*Tp/100.
#a0 = (kB*h/100.*Tp/mu/mH)**.5
a = (kB*Th/mu/mH)**.5
#~ n0 = (G*M/2./_np.pi)**.5*sig0/mu/mH/a/Req**1.5
#Th = a**2*mu*mH/kB
srcname = src.replace('source/','').replace('.txt','')
wmod = mod[:]
wmod[13]=wmod[13].replace('18.6',('%.2f' % rd))
wmod[18]=wmod[18].replace('= 1','= 4')
wmod[28]=wmod[28].replace('deltasco/Atsuo/1D/data/dSco_a035_01',(sfile))
wmod[29]=wmod[29].replace('2.3',('%.2f' % (day/365.25)))
if not hseq:
wmod[33]=wmod[33].replace('1.5',('%.2f' % mh))
suffix = '_SB{0}_{1:.1f}d_h{2:03.0f}_Rd{3:05.1f}_{4}'.format(\
_phc.trimpathname(sfile)[1],day,h,rd,srcname)
else:
wmod[31]=wmod[31].replace('= 0','= 1')
wmod[36]=wmod[36].replace('1.5',('%.2f' % mh))
suffix = '_SB{0}_{1:.1f}d_hseq_Rd{2:05.1f}_{3}'.format(\
_phc.trimpathname(sfile)[1],day,rd,srcname)
wmod[40]=wmod[40].replace('18000.',('%.1f' % Th))
f0=open('mod'+modn+'/mod'+modn+suffix+'.txt', 'w')
f0.writelines(wmod)
f0.close()
return
###TODO Setup Tpole = REF of a (scale height)
#Tps = dict(zip(Ms, Tp11))
###PROGRAM BEGINS
path0 = _os.getcwd()
if path != None:
_os.chdir(path)
if path[-1] != '/':
path += '/'
else:
path = ''
#Check modN folder
if not _os.path.exists('mod{}'.format(modn)):
_os.system('mkdir mod{}'.format(modn))
#Select sources
sources = _glob('source/'+selsources)
#Load disk model
f0 = open('{0}/refs/REF_disco.txt'.format(_hdt.hdtpath()))
mod = f0.readlines()
f0.close()
if sBdays is None or sBfiles is None:
for prodI in _product(sources,sig0vals,rdvals,hvals,mvals,mhvals):
doPL(prodI)
i = 0
if doFVDD:
i = 1
doMdot(prodI)
print('# {0:.0f} arquivos foram gerados !!'.format(len(sources)*\
len(sig0vals)*len(rdvals)*len(hvals)*(len(mvals)+i)*len(mhvals)))
else:
for prodI in _product(sources,rdvals,hvals,mhvals,sBdays,sBfiles):
doSB(prodI)
i = 0
if doFVDD:
i = 1
doSB(prodI, hseq=True)
print('# {0:.0f} arquivos foram gerados !!'.format(len(sources)*\
len(rdvals)*len(hvals)*len(sBdays)*(len(mhvals)+i)*len(sBfiles)))
if path is not '':
_os.chdir(path0)
###END PROGRAM
return
def makeInpJob(modn='01', nodes=512, simulations=['SED'],
docases=[1,3], sim1=['step1'], sim2=['step1_ref'], composition=['pureH'],
controls=['controls'], gridcells=['grid'], observers=['observers'],
images=[''], clusters=['job'], srcid='',
walltime='24:00:00', wcheck=False, email='$USER@localhost', chkout=False,
st1max=20, st1refmax=24, ctrM=False, touch=False, srcNf=None, path=None):
"""
Create INP+JOB files to run Hdust.
All SOURCE files must initiate by "Be_". Otherwise, the `makeInpJob` will
not work. This is to satisfies the criteria of a specific disk model for
each source star.
| ### Start edit here ###
| modn = '02'
|
| #clusters config
| # job = AlphaCrucis; oar = MesoCentre Licallo; ge = MesoCentre FRIPP
| clusters = ['job','oar','ge','bgp']
| clusters = ['oar']
| nodes = 48
| #if wcheck == True, walltime will be AUTOMATICALLY estimated
| walltime = '3:00:00'
| wcheck = True
| email = '[email protected]'
|
| #Check if the outputs already exist
| chkout = True
| #Above the values below, the step1 will be considered done!
| st1max = 26
| st1refmax = 30
| #Gera inp+job so' para o source com '1.45' no nome
| #Nao funciona caracteres especiais como * ou ?
| srcid = '1.45'
| srcid = ''
| #Se um dos 3 casos nao estiver presente, ele gera input comentado.
| docases = [1,2,3]
| #1 = step1 <> Gera inp+job so' para mod#/mod#.txt (SEM source, so disco)
| #habilita ADDSUFFIX; retira OBSERVERS e IMAGES
| sim1 = 'step1'
| #2 = step1_refine
| sim2 = 'step1_refine'
| #3 = outros <> Gera inp+job so' para mod#/mod#SOURCE.txt (post-proc.)
| #retira ADDSUFFIX; adiciona OBSERVERS (e talvez IMAGES)
| simulations = ['sed','h','brg','halpha','uv']
| simulations = ['sed_sig','brg_M','halpha_M','uv','j','h','k','l','m','n','q1','q2']
| simulations = ['SED','Ha']
| images = ['','h','brg','halpha','uv']
| images = simulations[:]
| composition = 'pureH'
| controls = 'no_op'
| controls = 'controls'
| ctrM = False
| gridcells = 'grid'
| observers = 'obs'
| touch = True
| ###stop edition here
"""
def isFloat(x):
try:
a = float(x)
except ValueError:
return False
else:
return True
def doCase1(inp,cases):
case1 = inp[:]
case1[0] = case1[0].replace('suffix',suf)
case1[1] = case1[1].replace('pureH',composition)
if ctrM:
i = suf.find('_M')
M = suf[i:i+7]
case1[2] = case1[2].replace('controls',controls+M)
else:
case1[2] = case1[2].replace('controls',controls)
case1[3] = case1[3].replace('grid',gridcells)
case1[4] = case1[4].replace('step1',sim1)
case1[5] = case1[5].replace('source',src)
if 1 not in cases:
for i in range(len(case1)):
case1[i] = '!~ '+case1[i]
return case1
def doCase2(inp,cases):
case1 = inp[:]
case1[0] = case1[0].replace('suffix',suf)
case1[1] = case1[1].replace('pureH',composition)
if ctrM:
i = suf.find('_M')
M = suf[i:i+7]
case1[2] = case1[2].replace('controls',controls+M)
else:
case1[2] = case1[2].replace('controls',controls)
case1[3] = case1[3].replace('grid',gridcells)
case1[4] = case1[4].replace('step1',sim2)
case1[5] = case1[5].replace('source',src)
if 2 not in cases:
for i in range(len(case1)):
case1[i] = '!~ '+case1[i]
return case1
def doCase3(inp,simchk):
case3 = []
for i in range(len(simulations)):
case1 = inp[:]
case1[0] = case1[0].replace('suffix',suf)
case1[1] = case1[1].replace('pureH',composition)
if ctrM:
j = suf.find('_M')
M = suf[j:j+7]
case1[2] = case1[2].replace('controls',controls+M)
else:
case1[2] = case1[2].replace('controls',controls)
case1[3] = case1[3].replace('grid',gridcells)
case1[5] = case1[5].replace('source',src)
if simulations[i] == 'SED':
sig = suf[suf.find('_sig')+4:suf.find('_sig')+8]
if isFloat(sig) and srcNf[i]:
case1[4] = case1[4].replace('step1','SED_sig{0}'.format(sig))
else:
case1[4] = case1[4].replace('step1',simulations[i])
elif srcNf[i]:
case1[4] = case1[4].replace('step1','{0}_{1}'.format(\
simulations[i],src))
else:
case1[4] = case1[4].replace('step1',simulations[i])
case1.append("OBSERVERS = '{0}'\n".format(observers))
if images[i] != '':
case1.append("IMAGES = '{0}'\n".format(images[i]))
case1.append('\n')
if not simchk[i]:
for i in range(len(case1)):
case1[i] = '!~ '+case1[i]
case3 += case1
return case3
def doJobs(mod, sel, nodes, addtouch='\n'):
#load Ref
f0 = open('{0}/refs/REF.{1}'.format(_hdt.hdtpath(),sel))
wout = f0.readlines()
f0.close()
outname = mod[mod.find('/')+1:].replace('txt',sel)
f0 = open('{0}s/{0}s_{1}_mod{2}.sh'.format(sel,proj,modn),'a')
if sel == 'job':
wout[4] = wout[4].replace('128','{0}'.format(nodes))
wout[4] = wout[4].replace('36:00:00','{0}'.format(walltime))
wout[8] = wout[8].replace('[email protected]','{0}'.format(email))
wout[11] = wout[11].replace('hdust_bestar2.02.inp','{0}/{1}'.\
format(proj,mod.replace('.txt','.inp')))
if touch:
wout[24] = addtouch
modchmod = _phc.trimpathname(mod)
modchmod[1] = modchmod[1].replace('.txt','*')
wout[31] = 'chmod 664 {0}/{1}/*{2}\nchmod 664 log/*\nchmod 664 ../../tmp/*\n'.\
format(proj, *modchmod)
f0.writelines('qsub {0}/{1}s/{2}\n'.format(proj,sel,outname))
elif sel == 'oar':
wout[2] = wout[2].replace('12','{0}'.format(nodes))
wout[2] = wout[2].replace('24:0:0','{0}'.format(walltime))
wout[10] = wout[10].replace('hdust_bestar2.02.inp','{0}/{1}'.\
format(proj,mod.replace('.txt','.inp')))
f0.writelines('chmod a+x {0}/{1}s/{2}\n'.format(proj,sel,outname))
f0.writelines('oarsub -S ./{0}/{1}s/{2}\n'.format(proj,sel,outname))
elif sel == 'ge':
wout[3] = wout[3].replace('48','{0}'.format(nodes))
wout[4] = wout[4].replace('45:00:00','{0}'.format(walltime))
wout[7] = wout[7].replace('[email protected]','{0}'.format(email))
wout[11] = wout[11].replace('hdust_bestar2.02.inp','{0}/{1}'.\
format(proj,mod.replace('.txt','.inp')))
f0.writelines('qsub -P hdust {0}/{1}s/{2}\n'.format(proj,sel,outname))
elif sel == 'bgp':
wout[14] = wout[14].replace('512','{0}'.format(nodes))
nodes = int(nodes)
if nodes%512 != 0:
nrsv = (nodes//512+1)*128
else:
nrsv = (nodes//512)*128
wout[10] = wout[10].replace('128','{0}'.format(nrsv))
wout[4] = wout[4].replace('24:00:00','{0}'.format(walltime))
wout[14] = wout[14].replace('hdust_bestar2.02.inp','{0}/{1}'.\
format(proj,mod.replace('.txt','.inp')))
f0.writelines('chmod +x {0}/{1}s/{2}\n'.format(proj,sel,outname))
f0.writelines('llsubmit ./{0}/{1}s/{2}\n'.format(proj,sel,outname))
f0.close()
f0 = open('{0}s/{1}'.format(sel,outname),'w')
f0.writelines(wout)
print('# Saved: {0}s/{1}'.format(sel,outname))
f0.close()
return
#PROGRAM START
if srcNf == None:
srcNf = len(simulations)*[False]
path0 = _os.getcwd()
if path != None:
_os.chdir(path)
if path[-1] != '/':
path += '/'
else:
path = ''
#obtain the actual directory
proj = _os.getcwd()
proj = proj[proj.rfind('/')+1:]
#Folder's checks
for sel in clusters:
if not _os.path.exists('{0}s'.format(sel)):
_os.system('mkdir {0}s'.format(sel))
elif _os.path.exists('{0}s/{0}s_{1}_mod{2}.sh'.format(sel,proj,modn)):
_os.system('rm {0}s/{0}s_{1}_mod{2}.sh'.format(sel,proj,modn))
#list of mods
mods = _glob('mod{0}/mod{0}*.txt'.format(modn))
#load REF_inp
f0 = open('{0}/refs/REF_inp.txt'.format(_hdt.hdtpath()))
inp = f0.readlines()
f0.close()
for mod in mods:
#Write inps
f0 = open(mod.replace('.txt','.inp'),'w')
f0.writelines('PROJECT = {0}\nMODEL = {1}\n\n'.format(proj,modn))
suf = mod[mod.find('_'):-4]
src = mod[mod.find('Be_'):-4]
if src.find(srcid) == -1:
continue
cases = docases[:]
#Do the touch thing
addtouch = '\n'
addtouch += 'chmod 664 ../../tmp/*\nchmod 664 {0}/mod{1}/*\n'.format(proj,modn)
if touch and ( (1 in cases) or (2 in cases) ):
addtouch += 'touch {0}/{1}\n'.format(proj, mod.replace('.txt','.log'))
if touch and 3 in cases:
for sim in simulations:
addtouch += 'touch {0}/{1}\n'.format(proj,mod.replace('.txt','.chk')).replace('mod{0}/mod{0}'.format(modn), 'mod{0}/{1}_mod{0}'.format(modn, sim))
addtouch += 'touch {0}/{1}\n'.format(proj,mod.replace('.txt','.err')).replace('mod{0}/mod{0}'.format(modn), 'mod{0}/{1}_mod{0}'.format(modn, sim))
addtouch += 'touch {0}/{1}\n'.format(proj,mod.replace('.txt','.log')).replace('mod{0}/mod{0}'.format(modn), 'mod{0}/{1}_mod{0}'.format(modn, sim))
addtouch += 'touch {0}/{1}\n'.format(proj,mod.replace('.txt','_SEI.chk')).replace('mod{0}/mod{0}'.format(modn), 'mod{0}/{1}_mod{0}'.format(modn, sim))
addtouch += 'touch {0}/{1}\n'.format(proj,mod.replace('.txt','_SEI.err')).replace('mod{0}/mod{0}'.format(modn), 'mod{0}/{1}_mod{0}'.format(modn, sim))
addtouch += 'touch {0}/{1}\n'.format(proj,mod.replace('.txt','_SEI.log')).replace('mod{0}/mod{0}'.format(modn), 'mod{0}/{1}_mod{0}'.format(modn, sim))
err90a = '{0}/{1}'.format(proj,mod.replace('.txt','.err').replace('mod{0}/mod{0}'.format(modn), 'mod{0}/{1}_mod{0}'.format(modn, sim)))
err90b = '{0}/{1}'.format(proj,mod.replace('.txt','_SEI.err').replace('mod{0}/mod{0}'.format(modn), 'mod{0}/{1}_mod{0}'.format(modn, sim)))
addtouch += 'touch {0}\n'.format(err90a[:90])
addtouch += 'touch {0}\n'.format(err90b[:90])
addtouch += 'touch {0}\n'.format(err90a[:90].replace(".err",".chk").replace(".er",".ch").replace(".e",".c"))
addtouch += 'touch {0}\n'.format(err90b[:90].replace(".err",".chk").replace(".er",".ch").replace(".e",".c"))
modchmod = _phc.trimpathname(mod)
modchmod[1] = modchmod[1].replace('.txt','*')
#~ addtouch += 'chmod 664 {0}/{1}/*{2}\n'.format(proj, *modchmod)
#Set simulation check variable
if 3 in cases:
simchk = _np.ones(len(simulations), dtype=bool)
else:
simchk = _np.zeros(len(simulations), dtype=bool)
if _os.path.exists(mod.replace('.txt','{0:02d}.temp'.format(st1max))) \
and chkout and 1 in cases:
cases.remove(1)
case1 = doCase1(inp,cases)
f0.writelines(case1+['\n'])
if _os.path.exists(mod.replace('.txt','{0:02d}.temp'.format(st1refmax)))\
and chkout and 2 in cases:
cases.remove(2)
case2 = doCase2(inp,cases)
f0.writelines(case2+['\n'])
if chkout and 3 in cases:
for i in range(len(simulations)):
outs2a = 'mod{0}/{1}_mod{0}{2}.sed2'.format(modn,simulations[i],suf)
outs2b = 'mod{0}/{1}_mod{0}{2}_SEI.sed2'.format(modn,simulations[i],suf)
if _os.path.exists(outs2a) or _os.path.exists(outs2b):
simchk[i] = False
if True not in simchk:
cases.remove(3)
case3 = doCase3(inp,simchk)
f0.writelines(case3)
f0.close()
#Def automatic walltime:
if wcheck:
h = 0
if 1 in cases:
h+=1
if 2 in cases:
h+=1
idx = _np.where(simchk==True)
if len(idx[0])>0:
extra = 4+len(idx[0])
h = h+extra*48/nodes
walltime = '{0}:0:0'.format(h)
#Del old jobs
for sel in clusters:
outname = mod[mod.find('/')+1:].replace('txt',sel)
if _os.path.exists('{0}s/{1}'.format(sel,outname)):
_os.system('rm {0}s/{1}'.format(sel,outname))
#Write jobs (if necessary)
if len(cases)>0:
for sel in clusters:
doJobs(mod,sel,nodes,addtouch)
if path is not '':
_os.chdir(path0)
#PROGRAM END
return
def makeNoDiskGrid(modn, selsources, path=None):
"""
#Create a model list with random disk parameters ("noCS" in filename)
INPUT: modn = '01'; selsources = '*' (filter that is applied to the SOURCE
selection).
OUTPUT: Files written
"""
def doNoCS(src):
'''
Given a src, generates the noCS model input
'''
srcname = src.replace('source/','').replace('.txt','')
suffix = '_noCS_{}'.format(srcname)
wmod = mod[:]
#Remove a disk does not work:
#wmod[9]=wmod[9].replace('1','0')
wmod[13]=wmod[13].replace('18.6','2.0')
f0=open('mod'+modn+'/mod'+modn+suffix+'.txt', 'w')
f0.writelines(wmod)
f0.close()
return
###PROGRAM BEGINS
path0 = _os.getcwd()
if path != None:
_os.chdir(path)
if path[-1] != '/':
path += '/'
else:
path = ''
#Check modN folder
if not _os.path.exists('mod{}'.format(modn)):
_os.system('mkdir mod{}'.format(modn))
#Select sources
sources = _glob('source/'+selsources)
#Load disk model
f0 = open('{0}/refs/REF_disco.txt'.format(_hdt.hdtpath()))
mod = f0.readlines()
f0.close()
for prodI in _product(sources):
prodI = prodI[0]
doNoCS(prodI)
print('# {:.0f} arquivos foram gerados !!'.format(len(sources)))
if path is not "":
_os.chdir(path0)
###END PROGRAM
return
def makeSimulLine(vrots, basesims, Rs, hwidth, Ms, Obs, suffix):
"""
| vrots = [[167.023,229.187,271.072,301.299,313.702],
| [177.998,244.636,290.596,324.272,338.298],
| [192.612,267.017,318.288,355.320,370.638],
| [202.059,281.667,335.158,373.716,389.782],
| [209.244,292.409,358.626,410.439,430.844],
| [214.407,297.661,357.799,402.628,420.683]]
| vrots = [[259.759,354.834,417.792,464.549,483.847],
| [252.050,346.163,406.388,449.818,468.126],
| [245.127,336.834,399.983,448.076,467.806],
| [239.522,329.496,388.734,432.532,450.806],
| [234.301,321.139,379.297,423.241,441.122],
| [228.538,313.797,370.343,412.488,429.914],
| [219.126,299.656,354.547,395.821,413.008],
| [211.544,288.840,341.081,380.426,396.978],
| [203.438,279.328,328.666,365.697,380.660],
| [197.823,268.964,316.901,353.568,368.506],
| [192.620,262.688,308.208,341.963,356.410],
| [187.003,255.125,299.737,332.511,346.043]]
|
| basesims = ['simulation/Brg.txt','simulation/Ha.txt']
| Rs = [12000, 20000]
|
| Ms = [4.00,5.00,7.00,9.00,12.00,15.00]
| Ms = [14.6, 12.5, 10.8, 9.6, 8.6, 7.7, 6.4, 5.5, 4.8, 4.2, 3.8, 3.4]
| Obs = [1.1,1.2,1.3,1.4,1.45]
| suffix = 'H0.30_Z0.014_bE_Ell'
"""
c = _phc.c.cgs
for prodI in _product(Ms,Obs,basesims):
M,Ob,basesim = prodI
f0 = open(basesim)
mod = f0.readlines()
f0.close()
srcid = 'Be_M{0:05.2f}_ob{1:.2f}'.format(M,Ob)
i = Ms.index(M)
j = Obs.index(Ob)
k = basesims.index(basesim)
R = Rs[k]
nmod = mod[:]
vel = '{0:.1f}'.format(hwidth+vrots[i][j])
nmod[103] = nmod[103].replace('1020.',vel)
n = str(int(round(2*(hwidth+vrots[i][j])*R/c*1e5)))
print(srcid, n)
nmod[100] = nmod[100].replace('100',n)
f0 = open(basesim.replace('.txt','_{0}_{1}.txt'.format(srcid, suffix)),'w')
f0.writelines(nmod)
f0.close()
return
def makeStarGrid(oblats, Hfs, path=None):
"""
| INPUT: oblats = [1.1,1.2,1.3,1.4,1.45] (example)
| Hfs = [0.3] (example)
Masses list a Z value are inside `geneve_par.pro` file.
"""
path0 = _os.getcwd()
if path != None:
_os.chdir(path)
if path[-1] != '/':
path += '/'
else:
path = ''
if not _os.path.exists('stmodels'):
_os.system('mkdir stmodels')
try:
runIDL = True
import pidly
except ImportError:
print('# This system do not have pIDLy installed...')
runIDL = False
if runIDL:
key = raw_input('# Do you want to run "geneve_par" (y/other):')
if key != 'y':
runIDL = False
if runIDL:
import pidly
idl = pidly.IDL()
propath = _hdt.hdtpath()+'/refs/'
idl('cd,"{0}"'.format(propath))
idl('.r geneve_par')
for ob in oblats:
for H in Hfs:
idl('geneve_par, {}, {}, /oblat,/makeeps'.format(ob,H))
_os.system('mv {}/geneve_lum.eps stmodels/geneve_lum_{:.2f}_{:.2f}.eps'.format(propath,ob,H))
_os.system('mv {}/geneve_rp.eps stmodels/geneve_rp_{:.2f}_{:.2f}.eps'.format(propath,ob,H))
_os.system('mv {}/geneve_par.txt stmodels/oblat{}_h{}.txt'.format(propath,ob,H))
idl.close()
f0 = open('{0}/refs/REF_estrela.txt'.format(_hdt.hdtpath()))
mod = f0.readlines()
f0.close()
if not _os.path.exists('source'):
_os.system('mkdir source')
for ob in oblats:
for H in Hfs:
f0 = open('stmodels/oblat{}_h{}.txt'.format(ob,H))
matriz = f0.readlines()
f0.close()
Omega,W,Beta = map(float, matriz[1].split())
m2 = []
for i in range(4,len(matriz)):
if len(matriz[i])>1:
m2 += [matriz[i].split()[1:]]
matriz = _np.array(m2, dtype=float)
M = matriz[:,0] #MASS (SOLAR MASSES)
M = list(M)
Rp = matriz[:,1] #POLAR RADIUS (SOLAR RADII)
Rp = list(Rp)
L = matriz[:,2] #LUMINOSITY (in solar lum.)
L = list(L)
Z = [0.014] #METALLICITY(=Zsolar)
#(other options: 0.006, 0.002)
print 'Omega = ', Omega; print 'W = ', W; print 'beta = ', Beta;
print 'M = ', M; print 'Rp = ', Rp; print 'L = ', L
print "%.0f arquivos gerados\n" % (len(M)*len(Hfs))
#DEFINE ALL INDEX
for MI in M:
a = M.index(MI)
Raio = Rp[a]
Lum = L[a]
for RpI in Rp:
b = Rp.index(RpI)
for LI in L:
d = L.index(LI)
for ZI in Z:
g = Z.index(ZI)
suffix = '_M{:05.2f}_ob{:.2f}_H{:.2f}_Z{}_bE_Ell'. \
format(MI,ob,H,ZI,Beta,RpI,LI)
#REGISTRA VALORES
wmod = mod[:]
wmod[3]=wmod[3].replace('10.3065',('%.2f' % MI))
wmod[4]=wmod[4].replace('5.38462',('%.2f' % Raio))
wmod[5]=wmod[5].replace('0.775',('%.4f' % W))
wmod[6]=wmod[6].replace('7500.',('%.2f' % Lum))
wmod[7]=wmod[7].replace('0.25',('%.5f' % Beta))
f0=open('source/Be'+suffix+'.txt', 'w')
f0.writelines(wmod)
f0.close()
#
if path is not "":
_os.chdir(path0)
return
def makeSimulDens(dbase, basesim):
"""
Sets the SED simulations number of photos so that the signal/noise level
is approximately constant at visible polarization.
|dbase = _np.logspace(_np.log10(0.02),_np.log10(4.0),7)
|basesim = 'simulation/sed.txt'
"""
f0 = open(basesim)
mod = f0.readlines()
f0.close()
#fact = 2. Tempo execucao = d/1e13*fact
#Nf0 = 500000000
for d in dbase:
srcid = 'sig{0:.2f}'.format(d)
#alpha = .39794
#beta = 13.87219
alpha = 0.34588
beta = 8.50927
newd = int(10**(-alpha*_np.log10(d)+beta))
print('{}, N_f = {:.2f}e+9'.format(srcid, newd/1e9))
nmod = mod[:]
nmod[9]=nmod[9].replace('500000000','{}'.format(newd))
f0 = open(basesim.replace('.txt','_{}.txt'.format(srcid)),'w')
f0.writelines(nmod)
f0.close()
#a = raw_input('asdads')
return
### MAIN ###
if __name__ == "__main__":
pass
| gpl-3.0 | -8,789,998,822,184,297,000 | 36.208333 | 166 | 0.488683 | false |
MyPureCloud/kafka | release.py | 1 | 24941 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Utility for creating release candidates and promoting release candidates to a final relase.
Usage: release.py
The utility is interactive; you will be prompted for basic release information and guided through the process.
This utility assumes you already have local a kafka git folder and that you
have added remotes corresponding to both:
(i) the github apache kafka mirror and
(ii) the apache kafka git repo.
"""
from __future__ import print_function
import datetime
from getpass import getpass
import json
import os
import subprocess
import sys
import tempfile
PROJECT_NAME = "kafka"
CAPITALIZED_PROJECT_NAME = "kafka".upper()
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
# Location of the local git repository
REPO_HOME = os.environ.get("%s_HOME" % CAPITALIZED_PROJECT_NAME, SCRIPT_DIR)
# Remote name, which points to Github by default
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "apache-github")
PREFS_FILE = os.path.join(SCRIPT_DIR, '.release-settings.json')
delete_gitrefs = False
work_dir = None
def fail(msg):
if work_dir:
cmd("Cleaning up work directory", "rm -rf %s" % work_dir)
if delete_gitrefs:
try:
cmd("Resetting repository working state to branch %s" % starting_branch, "git reset --hard HEAD && git checkout %s" % starting_branch, shell=True)
cmd("Deleting git branches %s" % release_version, "git branch -D %s" % release_version, shell=True)
cmd("Deleting git tag %s" %rc_tag , "git tag -d %s" % rc_tag, shell=True)
except subprocess.CalledProcessError:
print("Failed when trying to clean up git references added by this script. You may need to clean up branches/tags yourself before retrying.")
print("Expected git branch: " + release_version)
print("Expected git tag: " + rc_tag)
print(msg)
sys.exit(1)
def print_output(output):
if output is None or len(output) == 0:
return
for line in output.split('\n'):
print(">", line)
def cmd(action, cmd, *args, **kwargs):
if isinstance(cmd, basestring) and not kwargs.get("shell", False):
cmd = cmd.split()
allow_failure = kwargs.pop("allow_failure", False)
stdin_log = ""
if "stdin" in kwargs and isinstance(kwargs["stdin"], basestring):
stdin_log = "--> " + kwargs["stdin"]
stdin = tempfile.TemporaryFile()
stdin.write(kwargs["stdin"])
stdin.seek(0)
kwargs["stdin"] = stdin
print(action, cmd, stdin_log)
try:
output = subprocess.check_output(cmd, *args, stderr=subprocess.STDOUT, **kwargs)
print_output(output)
except subprocess.CalledProcessError as e:
print_output(e.output)
if allow_failure:
return
print("*************************************************")
print("*** First command failure occurred here. ***")
print("*** Will now try to clean up working state. ***")
print("*************************************************")
fail("")
def cmd_output(cmd, *args, **kwargs):
if isinstance(cmd, basestring):
cmd = cmd.split()
return subprocess.check_output(cmd, *args, stderr=subprocess.STDOUT, **kwargs)
def replace(path, pattern, replacement):
updated = []
with open(path, 'r') as f:
for line in f:
updated.append((replacement + '\n') if line.startswith(pattern) else line)
with open(path, 'w') as f:
for line in updated:
f.write(line)
def user_ok(msg):
ok = raw_input(msg)
return ok.lower() == 'y'
def sftp_mkdir(dir):
basedir, dirname = os.path.split(dir)
if not basedir:
basedir = "."
try:
cmd_str = """
cd %s
mkdir %s
""" % (basedir, dirname)
cmd("Creating '%s' in '%s' in your Apache home directory if it does not exist (errors are ok if the directory already exists)" % (dirname, basedir), "sftp -b - %[email protected]" % apache_id, stdin=cmd_str, allow_failure=True)
except subprocess.CalledProcessError:
# This is ok. The command fails if the directory already exists
pass
def get_pref(prefs, name, request_fn):
"Get a preference from existing preference dictionary or invoke a function that can collect it from the user"
val = prefs.get(name)
if not val:
val = request_fn()
prefs[name] = val
return val
# Load saved preferences
prefs = {}
if os.path.exists(PREFS_FILE):
with open(PREFS_FILE, 'r') as prefs_fp:
prefs = json.load(prefs_fp)
if not user_ok("""Requirements:
1. Updated docs to reference the new release version where appropriate.
2. JDK7 and JDK8 compilers and libraries
3. Your Apache ID, already configured with SSH keys on id.apache.org and SSH keys available in this shell session
4. All issues in the target release resolved with valid resolutions (if not, this script will report the problematic JIRAs)
5. A GPG key used for signing the release. This key should have been added to public Apache servers and the KEYS file on the Kafka site
6. Standard toolset installed -- git, gpg, gradle, sftp, etc.
7. ~/.gradle/gradle.properties configured with the signing properties described in the release process wiki, i.e.
mavenUrl=https://repository.apache.org/service/local/staging/deploy/maven2
mavenUsername=your-apache-id
mavenPassword=your-apache-passwd
signing.keyId=your-gpgkeyId
signing.password=your-gpg-passphrase
signing.secretKeyRingFile=/Users/your-id/.gnupg/secring.gpg (if you are using GPG 2.1 and beyond, then this file will no longer exist anymore, and you have to manually create it from the new private key directory with "gpg --export-secret-keys -o ~/.gnupg/secring.gpg")
8. ~/.m2/settings.xml configured for pgp signing and uploading to apache release maven, i.e.,
<server>
<id>apache.releases.https</id>
<username>your-apache-id</username>
<password>your-apache-passwd</password>
</server>
<server>
<id>your-gpgkeyId</id>
<passphrase>your-gpg-passphase</passphrase>
</server>
<profile>
<id>gpg-signing</id>
<properties>
<gpg.keyname>your-gpgkeyId</gpg.keyname>
<gpg.passphraseServerId>your-gpgkeyId</gpg.passphraseServerId>
</properties>
</profile>
9. You may also need to update some gnupgp configs:
~/.gnupg/gpg-agent.conf
allow-loopback-pinentry
~/.gnupg/gpg.conf
use-agent
pinentry-mode loopback
echo RELOADAGENT | gpg-connect-agent
If any of these are missing, see https://cwiki.apache.org/confluence/display/KAFKA/Release+Process for instructions on setting them up.
Some of these may be used from these previous settings loaded from %s:
%s
Do you have all of of these setup? (y/n): """ % (PREFS_FILE, json.dumps(prefs, indent=2))):
fail("Please try again once you have all the prerequisites ready.")
starting_branch = cmd_output('git rev-parse --abbrev-ref HEAD')
cmd("Verifying that you have no unstaged git changes", 'git diff --exit-code --quiet')
cmd("Verifying that you have no staged git changes", 'git diff --cached --exit-code --quiet')
release_version = raw_input("Release version (without any RC info, e.g. 1.0.0): ")
try:
release_version_parts = release_version.split('.')
if len(release_version_parts) != 3:
fail("Invalid release version, should have 3 version number components")
# Validate each part is a number
[int(x) for x in release_version_parts]
except ValueError:
fail("Invalid release version, should be a dotted version number")
rc = raw_input("Release candidate number: ")
dev_branch = '.'.join(release_version_parts[:2])
docs_version = ''.join(release_version_parts[:2])
# Validate that the release doesn't already exist and that the
cmd("Fetching tags from upstream", 'git fetch --tags %s' % PUSH_REMOTE_NAME)
tags = cmd_output('git tag').split()
if release_version in tags:
fail("The specified version has already been tagged and released.")
# TODO promotion
if not rc:
fail("Automatic Promotion is not yet supported.")
# Find the latest RC and make sure they want to promote that one
rc_tag = sorted([t for t in tags if t.startswith(release_version + '-rc')])[-1]
if not user_ok("Found %s as latest RC for this release. Is this correct? (y/n): "):
fail("This script couldn't determine which RC tag to promote, you'll need to fix up the RC tags and re-run the script.")
sys.exit(0)
# Prereq checks
apache_id = get_pref(prefs, 'apache_id', lambda: raw_input("Enter your apache username: "))
jdk7_java_home = get_pref(prefs, 'jdk7', lambda: raw_input("Enter the path for JAVA_HOME for a JDK7 compiler (blank to use default JAVA_HOME): "))
jdk7_env = dict(os.environ) if jdk7_java_home.strip() else None
if jdk7_env is not None: jdk7_env['JAVA_HOME'] = jdk7_java_home
if "1.7.0" not in cmd_output("java -version", env=jdk7_env):
fail("You must be able to build artifacts with JDK7 for Scala 2.10 and 2.11 artifacts")
jdk8_java_home = get_pref(prefs, 'jdk8', lambda: raw_input("Enter the path for JAVA_HOME for a JDK8 compiler (blank to use default JAVA_HOME): "))
jdk8_env = dict(os.environ) if jdk8_java_home.strip() else None
if jdk8_env is not None: jdk8_env['JAVA_HOME'] = jdk8_java_home
if "1.8.0" not in cmd_output("java -version", env=jdk8_env):
fail("You must be able to build artifacts with JDK8 for Scala 2.12 artifacts")
def select_gpg_key():
print("Here are the available GPG keys:")
available_keys = cmd_output("gpg --list-secret-keys")
print(available_keys)
key_name = raw_input("Which user name (enter the user name without email address): ")
if key_name not in available_keys:
fail("Couldn't find the requested key.")
return key_name
key_name = get_pref(prefs, 'gpg-key', select_gpg_key)
gpg_passphrase = get_pref(prefs, 'gpg-pass', lambda: getpass("Passphrase for this GPG key: "))
# Do a quick validation so we can fail fast if the password is incorrect
with tempfile.NamedTemporaryFile() as gpg_test_tempfile:
gpg_test_tempfile.write("abcdefg")
cmd("Testing GPG key & passphrase", ["gpg", "--batch", "--pinentry-mode", "loopback", "--passphrase-fd", "0", "-u", key_name, "--armor", "--output", gpg_test_tempfile.name + ".asc", "--detach-sig", gpg_test_tempfile.name], stdin=gpg_passphrase)
# Save preferences
print("Saving preferences to %s" % PREFS_FILE)
with open(PREFS_FILE, 'w') as prefs_fp:
prefs = json.dump(prefs, prefs_fp)
# Generate RC
try:
int(rc)
except ValueError:
fail("Invalid release candidate number: %s" % rc)
rc_tag = release_version + '-rc' + rc
delete_gitrefs = True # Since we are about to start creating new git refs, enable cleanup function on failure to try to delete them
cmd("Checking out current development branch", "git checkout -b %s %s" % (release_version, PUSH_REMOTE_NAME + "/" + dev_branch))
print("Updating version numbers")
replace("gradle.properties", "version", "version=%s" % release_version)
replace("tests/kafkatest/__init__.py", "__version__", "__version__ = '%s'" % release_version)
cmd("update streams quickstart pom", ["sed", "-i", ".orig"," s/-SNAPSHOT//", "streams/quickstart/pom.xml"])
cmd("update streams quickstart java pom", ["sed", "-i", ".orig", "s/-SNAPSHOT//", "streams/quickstart/java/pom.xml"])
cmd("update streams quickstart java pom", ["sed", "-i", ".orig", "s/-SNAPSHOT//", "streams/quickstart/java/src/main/resources/archetype-resources/pom.xml"])
cmd("remove backup pom.xml", "rm streams/quickstart/pom.xml.orig")
cmd("remove backup java pom.xml", "rm streams/quickstart/java/pom.xml.orig")
cmd("remove backup java pom.xml", "rm streams/quickstart/java/src/main/resources/archetype-resources/pom.xml.orig")
# Command in explicit list due to messages with spaces
cmd("Commiting version number updates", ["git", "commit", "-a", "-m", "Bump version to %s" % release_version])
# Command in explicit list due to messages with spaces
cmd("Tagging release candidate %s" % rc_tag, ["git", "tag", "-a", rc_tag, "-m", rc_tag])
rc_githash = cmd_output("git show-ref --hash " + rc_tag)
cmd("Switching back to your starting branch", "git checkout %s" % starting_branch)
# Note that we don't use tempfile here because mkdtemp causes problems with sftp and being able to determine the absolute path to a file.
# Instead we rely on a fixed path and if it
work_dir = os.path.join(REPO_HOME, ".release_work_dir")
if os.path.exists(work_dir):
fail("A previous attempt at a release left dirty state in the work directory. Clean up %s before proceeding. (This attempt will try to cleanup, simply retrying may be sufficient now...)" % work_dir)
os.makedirs(work_dir)
print("Temporary build working director:", work_dir)
kafka_dir = os.path.join(work_dir, 'kafka')
streams_quickstart_dir = os.path.join(kafka_dir, 'streams/quickstart')
print("Streams quickstart dir", streams_quickstart_dir)
cmd("Creating staging area for release artifacts", "mkdir kafka-" + rc_tag, cwd=work_dir)
artifacts_dir = os.path.join(work_dir, "kafka-" + rc_tag)
cmd("Cloning clean copy of repo", "git clone %s kafka" % REPO_HOME, cwd=work_dir)
cmd("Checking out RC tag", "git checkout -b %s %s" % (release_version, rc_tag), cwd=kafka_dir)
current_year = datetime.datetime.now().year
cmd("Verifying the correct year in NOTICE", "grep %s NOTICE" % current_year, cwd=kafka_dir)
with open(os.path.join(artifacts_dir, "RELEASE_NOTES.html"), 'w') as f:
print("Generating release notes")
try:
subprocess.check_call(["./release_notes.py", release_version], stdout=f)
except subprocess.CalledProcessError as e:
print_output(e.output)
print("*************************************************")
print("*** First command failure occurred here. ***")
print("*** Will now try to clean up working state. ***")
print("*************************************************")
fail("")
params = { 'release_version': release_version,
'rc_tag': rc_tag,
'artifacts_dir': artifacts_dir
}
cmd("Creating source archive", "git archive --format tar.gz --prefix kafka-%(release_version)s-src/ -o %(artifacts_dir)s/kafka-%(release_version)s-src.tgz %(rc_tag)s" % params)
cmd("Building artifacts", "gradle", cwd=kafka_dir, env=jdk7_env)
cmd("Building artifacts", "./gradlew clean releaseTarGzAll aggregatedJavadoc", cwd=kafka_dir, env=jdk7_env)
# we need extra cmd to build 2.12 with jdk8 specifically
cmd("Building artifacts for Scala 2.12", "./gradlew releaseTarGz -PscalaVersion=2.12", cwd=kafka_dir, env=jdk8_env)
cmd("Copying artifacts", "cp %s/core/build/distributions/* %s" % (kafka_dir, artifacts_dir), shell=True)
cmd("Copying artifacts", "cp -R %s/build/docs/javadoc %s" % (kafka_dir, artifacts_dir))
for filename in os.listdir(artifacts_dir):
full_path = os.path.join(artifacts_dir, filename)
if not os.path.isfile(full_path):
continue
# Commands in explicit list due to key_name possibly containing spaces
cmd("Signing " + full_path, ["gpg", "--batch", "--passphrase-fd", "0", "-u", key_name, "--armor", "--output", full_path + ".asc", "--detach-sig", full_path], stdin=gpg_passphrase)
cmd("Verifying " + full_path, ["gpg", "--verify", full_path + ".asc", full_path])
# Note that for verification, we need to make sure only the filename is used with --print-md because the command line
# argument for the file is included in the output and verification uses a simple diff that will break if an absolut path
# is used.
dir, fname = os.path.split(full_path)
cmd("Generating MD5 for " + full_path, "gpg --print-md md5 %s > %s.md5" % (fname, fname), shell=True, cwd=dir)
cmd("Generating SHA1 for " + full_path, "gpg --print-md sha1 %s > %s.sha1" % (fname, fname), shell=True, cwd=dir)
cmd("Generating SHA512 for " + full_path, "gpg --print-md sha512 %s > %s.sha512" % (fname, fname), shell=True, cwd=dir)
cmd("Listing artifacts to be uploaded:", "ls -R %s" % artifacts_dir)
if not user_ok("Going to upload the artifacts in %s, listed above, to your Apache home directory. Ok (y/n)?): " % artifacts_dir):
fail("Quitting")
sftp_mkdir("public_html")
kafka_output_dir = "kafka-" + rc_tag
sftp_mkdir(os.path.join("public_html", kafka_output_dir))
public_release_dir = os.path.join("public_html", kafka_output_dir)
# The sftp -r option doesn't seem to work as would be expected, at least with the version shipping on OS X. To work around this we process all the files and directories manually...
sftp_cmds = ""
for root, dirs, files in os.walk(artifacts_dir):
assert root.startswith(artifacts_dir)
for file in files:
local_path = os.path.join(root, file)
remote_path = os.path.join("public_html", kafka_output_dir, root[len(artifacts_dir)+1:], file)
sftp_cmds += "\nput %s %s" % (local_path, remote_path)
for dir in dirs:
sftp_mkdir(os.path.join("public_html", kafka_output_dir, root[len(artifacts_dir)+1:], dir))
if sftp_cmds:
cmd("Uploading artifacts in %s to your Apache home directory" % root, "sftp -b - %[email protected]" % apache_id, stdin=sftp_cmds)
with open(os.path.expanduser("~/.gradle/gradle.properties")) as f:
contents = f.read()
if not user_ok("Going to build and upload mvn artifacts based on these settings:\n" + contents + '\nOK (y/n)?: '):
fail("Retry again later")
cmd("Building and uploading archives", "./gradlew uploadArchivesAll", cwd=kafka_dir, env=jdk7_env)
cmd("Building and uploading archives", "./gradlew uploadCoreArchives_2_12 -PscalaVersion=2.12", cwd=kafka_dir, env=jdk8_env)
cmd("Building and uploading archives", "mvn deploy -Pgpg-signing", cwd=streams_quickstart_dir, env=jdk7_env)
release_notification_props = { 'release_version': release_version,
'rc': rc,
'rc_tag': rc_tag,
'rc_githash': rc_githash,
'dev_branch': dev_branch,
'docs_version': docs_version,
'apache_id': apache_id,
}
# TODO: Many of these suggested validation steps could be automated and would help pre-validate a lot of the stuff voters test
print("""
*******************************************************************************************************************************************************
Ok. We've built and staged everything for the %(rc_tag)s.
Now you should sanity check it before proceeding. All subsequent steps start making RC data public.
Some suggested steps:
* Grab the source archive and make sure it compiles: http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz
* Grab one of the binary distros and run the quickstarts against them: http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka_2.11-%(release_version)s.tgz
* Extract and verify one of the site docs jars: http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka_2.11-%(release_version)s-site-docs.tgz
* Build a sample against jars in the staging repo: (TODO: Can we get a temporary URL before "closing" the staged artifacts?)
* Validate GPG signatures on at least one file:
wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz &&
wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz.asc &&
wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz.md5 &&
wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz.sha1 &&
wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz.sha512 &&
gpg --verify kafka-%(release_version)s-src.tgz.asc kafka-%(release_version)s-src.tgz &&
gpg --print-md md5 kafka-%(release_version)s-src.tgz | diff - kafka-%(release_version)s-src.tgz.md5 &&
gpg --print-md sha1 kafka-%(release_version)s-src.tgz | diff - kafka-%(release_version)s-src.tgz.sha1 &&
gpg --print-md sha512 kafka-%(release_version)s-src.tgz | diff - kafka-%(release_version)s-src.tgz.sha512 &&
rm kafka-%(release_version)s-src.tgz* &&
echo "OK" || echo "Failed"
* Validate the javadocs look ok. They are at http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/javadoc/
*******************************************************************************************************************************************************
""" % release_notification_props)
if not user_ok("Have you sufficiently verified the release artifacts (y/n)?: "):
fail("Ok, giving up")
print("Next, we need to get the Maven artifacts we published into the staging repository.")
# TODO: Can we get this closed via a REST API since we already need to collect credentials for this repo?
print("Go to https://repository.apache.org/#stagingRepositories and hit 'Close' for the new repository that was created by uploading artifacts.")
if not user_ok("Have you successfully deployed the artifacts (y/n)?: "):
fail("Ok, giving up")
if not user_ok("Ok to push RC tag %s (y/n)?: " % rc_tag):
fail("Ok, giving up")
cmd("Pushing RC tag", "git push %s %s" % (PUSH_REMOTE_NAME, rc_tag))
# Move back to starting branch and clean out the temporary release branch (e.g. 1.0.0) we used to generate everything
cmd("Resetting repository working state", "git reset --hard HEAD && git checkout %s" % starting_branch, shell=True)
cmd("Deleting git branches %s" % release_version, "git branch -D %s" % release_version, shell=True)
email_contents = """
To: [email protected], [email protected], [email protected]
Subject: [VOTE] %(release_version)s RC%(rc)s
Hello Kafka users, developers and client-developers,
This is the first candidate for release of Apache Kafka %(release_version)s.
<DESCRIPTION OF MAJOR CHANGES, INCLUDE INDICATION OF MAJOR/MINOR RELEASE>
Release notes for the %(release_version)s release:
http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/RELEASE_NOTES.html
*** Please download, test and vote by <VOTING DEADLINE, e.g. Monday, March 28, 9am PT>
Kafka's KEYS file containing PGP keys we use to sign the release:
http://kafka.apache.org/KEYS
* Release artifacts to be voted upon (source and binary):
http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/
* Maven artifacts to be voted upon:
https://repository.apache.org/content/groups/staging/
* Javadoc:
http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/javadoc/
* Tag to be voted upon (off %(dev_branch)s branch) is the %(release_version)s tag:
https://git-wip-us.apache.org/repos/asf?p=kafka.git;a=tag;h=%(rc_githash)s
* Documentation:
http://kafka.apache.org/%(docs_version)s/documentation.html
* Protocol:
http://kafka.apache.org/%(docs_version)s/protocol.html
* Successful Jenkins builds for the %(dev_branch)s branch:
Unit/integration tests: https://builds.apache.org/job/kafka-%(dev_branch)s-jdk7/<BUILD NUMBER>/
System tests: https://jenkins.confluent.io/job/system-test-kafka-%(dev_branch)s/<BUILD_NUMBER>/
/**************************************
Thanks,
<YOU>
""" % release_notification_props
print()
print()
print("*****************************************************************")
print()
print(email_contents)
print()
print("*****************************************************************")
print()
print("All artifacts should now be fully staged. Use the above template to send the announcement for the RC to the mailing list.")
print("IMPORTANT: Note that there are still some substitutions that need to be made in the template:")
print(" - Describe major changes in this release")
print(" - Deadline for voting, which should be at least 3 days after you send out the email")
print(" - Jenkins build numbers for successful unit & system test builds")
print(" - Fill in your name in the signature")
print(" - Finally, validate all the links before shipping!")
print("Note that all substitutions are annotated with <> around them.")
| apache-2.0 | -7,052,171,143,124,083,000 | 47.523346 | 275 | 0.670222 | false |
googleapis/python-dialogflow | google/cloud/dialogflow_v2/services/versions/transports/base.py | 1 | 8840 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.cloud.dialogflow_v2.types import version
from google.cloud.dialogflow_v2.types import version as gcd_version
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflow",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
_API_CORE_VERSION = google.api_core.__version__
class VersionsTransport(abc.ABC):
"""Abstract transport class for Versions."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
)
DEFAULT_HOST: str = "dialogflow.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes or self.AUTH_SCOPES
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): These two class methods are in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-api-core
# and google-auth are increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
# TODO: Remove this function once google-api-core >= 1.26.0 is required
@classmethod
def _get_self_signed_jwt_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Union[Optional[Sequence[str]], str]]:
"""Returns kwargs to pass to grpc_helpers.create_channel depending on the google-api-core version"""
self_signed_jwt_kwargs: Dict[str, Union[Optional[Sequence[str]], str]] = {}
if _API_CORE_VERSION and (
packaging.version.parse(_API_CORE_VERSION)
>= packaging.version.parse("1.26.0")
):
self_signed_jwt_kwargs["default_scopes"] = cls.AUTH_SCOPES
self_signed_jwt_kwargs["scopes"] = scopes
self_signed_jwt_kwargs["default_host"] = cls.DEFAULT_HOST
else:
self_signed_jwt_kwargs["scopes"] = scopes or cls.AUTH_SCOPES
return self_signed_jwt_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_versions: gapic_v1.method.wrap_method(
self.list_versions, default_timeout=None, client_info=client_info,
),
self.get_version: gapic_v1.method.wrap_method(
self.get_version, default_timeout=None, client_info=client_info,
),
self.create_version: gapic_v1.method.wrap_method(
self.create_version, default_timeout=None, client_info=client_info,
),
self.update_version: gapic_v1.method.wrap_method(
self.update_version, default_timeout=None, client_info=client_info,
),
self.delete_version: gapic_v1.method.wrap_method(
self.delete_version, default_timeout=None, client_info=client_info,
),
}
@property
def list_versions(
self,
) -> Callable[
[version.ListVersionsRequest],
Union[version.ListVersionsResponse, Awaitable[version.ListVersionsResponse]],
]:
raise NotImplementedError()
@property
def get_version(
self,
) -> Callable[
[version.GetVersionRequest], Union[version.Version, Awaitable[version.Version]]
]:
raise NotImplementedError()
@property
def create_version(
self,
) -> Callable[
[gcd_version.CreateVersionRequest],
Union[gcd_version.Version, Awaitable[gcd_version.Version]],
]:
raise NotImplementedError()
@property
def update_version(
self,
) -> Callable[
[gcd_version.UpdateVersionRequest],
Union[gcd_version.Version, Awaitable[gcd_version.Version]],
]:
raise NotImplementedError()
@property
def delete_version(
self,
) -> Callable[
[version.DeleteVersionRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
__all__ = ("VersionsTransport",)
| apache-2.0 | -1,845,300,222,030,163,700 | 36.457627 | 108 | 0.637557 | false |
JaDogg/__py_playground | reference/sketchbook/chess/chess.py | 1 | 8066 | """
Chess board
No computer player yet
Sucks in other ways too
TO DO: look over http://home.hccnet.nl/h.g.muller/max-src2.html
"""
## b = InitialChessBoard()
## print str(b)
#. rnbqkbnr
#. pppppppp
#.
#.
#.
#.
#. PPPPPPPP
#. RNBQKBNR
## pw = HumanPlayer(white)
## pb = HumanPlayer(black)
## b.outcome
## ' '.join(sorted(map(str, b.get_moves())))
#. 'a2-a3 a2-a4 b1-a3 b1-c3 b2-b3 b2-b4 c2-c3 c2-c4 d2-d3 d2-d4 e2-e3 e2-e4 f2-f3 f2-f4 g1-f3 g1-h3 g2-g3 g2-g4 h2-h3 h2-h4 resign'
## m = b.parse_move('resign')
## b1 = m.update(b)
## b1.outcome
#. 'black'
def main():
print "(Moves look like 'e2-e3')"
play_chess(HumanPlayer, HumanPlayer)
def play_chess(white_strategy, black_strategy):
return play(InitialChessBoard(), [white_strategy, black_strategy])
def play(board, strategies):
players = [strategy(side)
for strategy, side in zip(strategies, board.get_sides())]
while board.get_outcome() is None:
board = board.play_turn(players)
for player in players:
player.on_game_over(board)
class HumanPlayer:
def __init__(self, side):
self.side = side
def pick_move(self, board):
board.show()
while True:
string = raw_input('%s, your move? ' % self.side.capitalize())
try:
move = board.parse_move(string)
except MoveIllegal:
print 'Illegal move.'
else:
return move
def on_game_over(self, board):
board.show()
if board.get_outcome() is None:
pass
elif board.get_outcome() == self.side:
print '%s, you win!' % self.side.capitalize()
elif board.get_outcome() == 'draw':
print 'You draw.'
else:
print '%s, you lose!' % self.side.capitalize()
def InitialChessBoard():
squares = ['----------',
'-rnbqkbnr-',
'-pppppppp-',
'- -',
'- -',
'- -',
'- -',
'-PPPPPPPP-',
'-RNBQKBNR-',
'----------',]
return ChessBoard(white, squares, (False, False), None)
class MoveIllegal(Exception):
pass
class ChessBoard:
def __init__(self, mover, squares, castled, outcome):
self.mover = mover
self.squares = squares
self.castled = castled
self.outcome = outcome
def __str__(self):
return '\n'.join(line[1:-1] for line in self.squares[1:-1])
def has_castled(self, player):
return self.castled[player == black]
def get_outcome(self):
"Return None, 'draw', black, or white (meaning the winner)."
return self.outcome
def resign(self):
return ChessBoard(opponent(self.mover),
self.squares,
self.castled,
opponent(self.mover))
def move_piece(self, (r0, c0), (r1, c1)):
squares = list(map(list, self.squares))
piece = squares[r0][c0]
squares[r0][c0] = ' '
squares[r1][c1] = piece
return ChessBoard(opponent(self.mover),
list(map(''.join, squares)),
self.castled,
None) # XXX check for checkmate or draw
def show(self):
print self
def get_sides(self):
return (white, black)
def play_turn(self, (white_player, black_player)):
player = white_player if self.mover == white else black_player
move = player.pick_move(self)
if move in self.get_moves():
return move.update(self)
raise Exception("Bad move")
def parse_move(self, string):
for move in self.get_moves():
if move.matches(string):
return move
raise MoveIllegal()
def get_moves(self):
return [ResignMove()] + self.get_piece_moves()
def get_piece_moves(self):
return sum(map(self.moves_from, self.army(self.mover)), [])
def army(self, player):
for r, row in enumerate(self.squares):
for c, piece in enumerate(row):
if piece.isalpha() and piece.isupper() == (player == white):
yield r, c
def moves_from(self, pos):
return list(self.gen_moves_from(pos))
def gen_moves_from(self, (r, c)):
piece = self.squares[r][c]
piece, white = piece.upper(), piece.isupper()
def is_takeable(r1, c1):
return is_empty(r1, c1) or has_opponent(r1, c1)
def is_empty(r1, c1):
return self.squares[r1][c1] == ' '
def has_opponent(r1, c1):
there = self.squares[r1][c1]
return there.isalpha() and there.isupper() != white
def move_to(r1, c1):
return PieceMove((r, c), (r1, c1))
def move_freely(dirs):
for dr, dc in dirs:
for i in range(1, 9):
if is_empty(r+dr*i, c+dc*i):
yield move_to(r+dr*i, c+dc*i)
else:
if has_opponent(r+dr*i, c+dc*i):
yield move_to(r+dr*i, c+dc*i)
break
if piece in ' -':
pass
elif piece == 'P':
# TODO: pawn promotion
# TODO: en passant
forward = -1 if white else 1
if is_empty(r+forward, c):
yield move_to(r+forward, c)
if r == (7 if white else 2): # initial 2 steps
if is_empty(r+forward*2, c): yield move_to(r+forward*2, c)
if has_opponent(r+forward, c-1): yield move_to(r+forward, c-1)
if has_opponent(r+forward, c+1): yield move_to(r+forward, c+1)
elif piece == 'K':
# TODO castling
# TODO forbid moving into check
# (and this can apply to moves of other pieces)
for dr, dc in queen_dirs:
if is_takeable(r+dr, c+dc):
yield move_to(r+dr, c+dc)
elif piece == 'Q':
for move in move_freely(queen_dirs): yield move
elif piece == 'R':
for move in move_freely(rook_dirs): yield move
elif piece == 'B':
for move in move_freely(bishop_dirs): yield move
elif piece == 'N':
for dr, dc in knight_jumps:
if 1 <= r+dr <= 8 and 1 <= c+dc <= 8:
if is_takeable(r+dr, c+dc):
yield move_to(r+dr, c+dc)
else:
assert False
rook_dirs = [( 0, 1), ( 0,-1), ( 1, 0), (-1, 0)]
bishop_dirs = [(-1,-1), (-1, 1), ( 1,-1), ( 1, 1)]
queen_dirs = rook_dirs + bishop_dirs
knight_jumps = [( 2, 1), ( 2,-1), ( 1, 2), ( 1,-2),
(-2, 1), (-2,-1), (-1, 2), (-1,-2)]
white, black = 'white', 'black'
def opponent(side):
return black if side == white else white
class ResignMove:
def __eq__(self, other):
return isinstance(other, ResignMove)
def update(self, board):
return board.resign()
def matches(self, string):
return string.lower() == 'resign'
def matches(self, string):
return string.lower() == str(self)
def __str__(self):
return 'resign'
class PieceMove:
def __init__(self, from_pos, to_pos):
self.from_pos = from_pos
self.to_pos = to_pos
def __eq__(self, other):
return (isinstance(other, PieceMove)
and self.from_pos == other.from_pos
and self.to_pos == other.to_pos)
def update(self, board):
return board.move_piece(self.from_pos, self.to_pos)
def matches(self, string):
return string.lower() == str(self)
def __str__(self):
# XXX 'a' is top of board for Black?
fr, fc = self.from_pos
tr, tc = self.to_pos
return '%s%d-%s%d' % ('abcdefgh'[fc-1], 9-fr,
'abcdefgh'[tc-1], 9-tr)
if __name__ == '__main__':
main()
| mit | -2,925,017,823,287,011,300 | 30.263566 | 131 | 0.508926 | false |
vladiibine/whispy_lispy | src/whispy_lispy/cst.py | 1 | 4255 | # -*- coding utf-8 -*-
"""
Concrete syntax tree stuff
Lexer should return tokens that are instances of classes found here
"""
from __future__ import unicode_literals
import six
from whispy_lispy import keywords
class CSTError(Exception):
pass
class Token(object):
"""Concrete syntax tree node.
Can represent a literal, operator, a name, or an atom.
An atom is an ordered list of the previously mentioned elements
"""
__slots__ = ['value', 'source', 'index']
def __init__(self, value, source=None, index=None):
"""
:param value: the value of the token (python type)
:param str source: the source code
:param int index: the index of the token in the source code
"""
self.value = value
self.source = source
self.index = index
def __repr__(self):
return '<T {}>'.format(self.value)
def __eq__(self, other):
if other is None:
return False
if not isinstance(other, Token):
return False
return self.value == other.value
class ConcreteSyntaxNode(object):
"""A node in the concrete syntax tree.
The state of this node is kept as a tuple
"""
__slots__ = ['values']
def __init__(self, values):
"""
The tuple either contains other nodes, or values. Not both!
:type values: tuple
"""
types = set(type(elem) for elem in values)
if len(types) > 1:
raise CSTError(
"Concrete Syntax Node should contain either other nodes, or "
"simple values, not both. This node contains {} value(s): {}"
.format(len(types), values)
)
self.values = values
def __eq__(self, other):
if other is None:
return False
if not isinstance(other, self.__class__):
return False
return self.values == other.values
def __repr__(self):
return '<cN {}>'.format(self.values)
def is_operator(self):
return (
len(self.values) == 1 and
self.values[0] in keywords.OPERATORS
)
def is_root(self):
return isinstance(self, RootConcreteSyntaxnode)
def is_leaf(self):
return all(
not isinstance(elem, ConcreteSyntaxNode) for elem in self.values)
def is_symbol(self):
return (
len(self.values) == 1 and
isinstance(self.values[0], six.string_types)
)
def is_int(self):
return (
len(self.values) == 1 and
isinstance(self.values[0], int)
)
def is_float(self):
return (
len(self.values) == 1 and
isinstance(self.values[0], float)
)
def is_bool(self):
return (
len(self.values) == 1 and
isinstance(self.values[0], bool)
)
def is_string(self):
return (
len(self.values) == 1 and
isinstance(self.values[0], six.string_types) and
self.values[0][0] == '"' and
self.values[0][-1] == '"'
)
def symbol_equals(self, param):
if not self.is_symbol():
raise CSTError('Node is not a symbol')
return self.values[0] == param
def symbol_in_iterable(self, iterable):
for elem in iterable:
if self.symbol_equals(elem):
return True
return False
class RootConcreteSyntaxnode(ConcreteSyntaxNode):
def __repr__(self):
return '<RcN {}>'.format(self.values)
class NestingCommand(Token):
"""Represents a command to either increment or decrement the tree level
"""
def __repr__(self):
return '{}'.format(self.value[0])
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.value == other.value
class IncrementNesting(NestingCommand):
def __init__(self, _=None, source=None, index=None):
super(IncrementNesting, self).__init__(['<INC>'], source, index)
class DecrementNesting(NestingCommand):
def __init__(self, _=None, source=None, index=None):
super(DecrementNesting, self).__init__(['<DEC>'], source, index)
| mit | -7,624,388,774,132,405,000 | 25.761006 | 77 | 0.564512 | false |
mcStargazer/nlp_talk_apr2017 | nlp_demo.py | 1 | 5920 | # -*- coding: utf-8 -*-
##############################################################################
# references
##############################################################################
# www.udemy.com/machinelearning/ - I really enjoyed this course. Take it!
# original data/code at www.superdatascience.com/machine-learning/
# en.wikipedia.org/wiki/Natural_language_processing
##############################################################################
# import the libraries
##############################################################################
# look to the future if running on Python 2.7
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# importing the standard libraries
import os
import sys
# importing 3rd party libraries
#import nltk # run this import and next line if stopwords
#nltk.download('stopwords') # are not already downloaded to your computer
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import numpy as np
import pandas as pd
import re
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split as split
from sklearn.tree import DecisionTreeClassifier as DTC
from sklearn.naive_bayes import GaussianNB as GNB
from sklearn.ensemble import RandomForestClassifier as RFC
# importing local
sys.path.append(os.path.abspath('.'))
##############################################################################
# prepare the data: read and clean
##############################################################################
# read the datasets
dataset = pd.read_csv('Restaurant_Reviews.tsv', delimiter = '\t', quoting = 3)
common_words = set(stopwords.words('english')) # sets are faster
# clean the text
corpus = [] # a list to hold the results
ps = PorterStemmer() # lower sparsity by stemming
for i in range(0, len(dataset['Review'])):
#i=0; i=1; i=2
review = dataset['Review'][i] # get the i'th review
review = re.sub('[^a-zA-Z]', ' ', review) # spacify non-letters
review = review.lower() # make all lowercase
review = review.split() # create iteratable
review = [ps.stem(word) for word in review # stem the words
if not word in common_words] # exclude stop words
corpus.append( ' '.join(review) )
##############################################################################
# fit and assess the model
##############################################################################
# set variables for the run
features = 1000 # number of words to keep in the model
method = "GNB" # methods include GNB, DTC, or RFC
folds = 30 # number of cross-folds to perform
verbose = 0 # if non-zero, prints metrics for each fold
# begin reporting
print("\nUsing {} Classifier: {} features, {} folds".format(method,
features,
folds))
header = "{:>8s},{:>9s},{:>10s},{:>13s},{:>8s}"
rows = "{:8d},{:>9.3f},{:>10.3f},{:>13.3f},{:>8.3f}"
if verbose:
print(header.format("n-fold","accuracy","precision","completeness","f1"))
# use the bag-of-words model to create X and y
cv = CountVectorizer(max_features = features)
X = cv.fit_transform(corpus).toarray()
y = dataset.iloc[:, 1].values
# run across multiple folds
m = {'a':[], 'p':[], 'c':[], 'f1':[]} # dict to hold n-fold metrics
for n in range(folds):
# Splitting the dataset into the Training set and Test set
X_train, X_test, y_train, y_test = split(X, y, test_size=0.20)
# Use any appropriate classifier.
# Commonly: Naive Bayes, Decision Trees, and Random Forests.
# Also: CART, C5.0, Maximum Entropy
if method == "GNB":
classifier = GNB()
if method == "DTC":
classifier = DTC(criterion='entropy', random_state=0)
if method == "RFC":
classifier = RFC(n_estimators=10, criterion='entropy', random_state=0)
# fit the machine learning algorithm and predict the test set results
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
# making the confusion matrix and derived metrics, and storing them
cm = confusion_matrix(y_test, y_pred)
a = (cm[0,0] + cm[1,1])/np.sum(cm) # accuracy = (TP+TN)/(TP+TN+FP+FN)
p = cm[0,0]/(cm[0,0] + cm[1,0]) # precision = TP/(TP+FP)
c = cm[0,0]/(cm[0,0] + cm[0,1]) # completeness = TP/(TP+FN)
f1 = 2*p*c/(p + c) # blend of precision and completeness
m['a'].append(a)
m['p'].append(p)
m['c'].append(c)
m['f1'].append(f1)
# report metrics for each fold
if verbose:
print(rows.format(n+1, a, p, c, f1))
# report summary of metrics
print("\n accuracy, precision, completeness, f1")
print(" minima", rows[6:].format(min(m['a']), min(m['p']),
min(m['c']), min(m['f1'])))
print(" mean", rows[6:].format(np.mean(m['a']), np.mean(m['p']),
np.mean(m['c']), np.mean(m['f1'])))
print(" maxima", rows[6:].format(max(m['a']), max(m['p']),
max(m['c']), max(m['f1'])))
##############################################################################
# where I am going from here...
##############################################################################
# continue exploring the parameter space balancing fit with appropriateness
# study word2vec and globe data models, other stemming algorithms
# www.udemy.com/natural-language-processing-with-deep-learning-in-python/
# www.udemy.com/data-science-natural-language-processing-in-python/
| mit | -1,762,223,717,350,091,000 | 39.827586 | 78 | 0.538176 | false |
apache/airflow | airflow/sensors/time_delta_sensor.py | 2 | 1073 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use :mod:`airflow.sensors.time_delta`."""
import warnings
from airflow.sensors.time_delta import TimeDeltaSensor # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.sensors.time_delta`.", DeprecationWarning, stacklevel=2
)
| apache-2.0 | 597,100,851,005,736,700 | 40.269231 | 107 | 0.768872 | false |
sudheerchintala/LearnEraPlatForm | common/test/acceptance/tests/test_discussion.py | 1 | 25400 | """
Tests for discussion pages
"""
import datetime
from pytz import UTC
from uuid import uuid4
from nose.plugins.attrib import attr
from .helpers import UniqueCourseTest
from ..pages.lms.auto_auth import AutoAuthPage
from ..pages.lms.courseware import CoursewarePage
from ..pages.lms.discussion import (
DiscussionTabSingleThreadPage,
InlineDiscussionPage,
InlineDiscussionThreadPage,
DiscussionUserProfilePage,
DiscussionTabHomePage,
DiscussionSortPreferencePage,
)
from ..fixtures.course import CourseFixture, XBlockFixtureDesc
from ..fixtures.discussion import (
SingleThreadViewFixture,
UserProfileViewFixture,
SearchResultFixture,
Thread,
Response,
Comment,
SearchResult,
)
class DiscussionResponsePaginationTestMixin(object):
"""
A mixin containing tests for response pagination for use by both inline
discussion and the discussion tab
"""
def setup_thread(self, num_responses, **thread_kwargs):
"""
Create a test thread with the given number of responses, passing all
keyword arguments through to the Thread fixture, then invoke
setup_thread_page.
"""
thread_id = "test_thread_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(id=thread_id, commentable_id=self.discussion_id, **thread_kwargs)
)
for i in range(num_responses):
thread_fixture.addResponse(Response(id=str(i), body=str(i)))
thread_fixture.push()
self.setup_thread_page(thread_id)
def assert_response_display_correct(self, response_total, displayed_responses):
"""
Assert that various aspects of the display of responses are all correct:
* Text indicating total number of responses
* Presence of "Add a response" button
* Number of responses actually displayed
* Presence and text of indicator of how many responses are shown
* Presence and text of button to load more responses
"""
self.assertEqual(
self.thread_page.get_response_total_text(),
str(response_total) + " responses"
)
self.assertEqual(self.thread_page.has_add_response_button(), response_total != 0)
self.assertEqual(self.thread_page.get_num_displayed_responses(), displayed_responses)
self.assertEqual(
self.thread_page.get_shown_responses_text(),
(
None if response_total == 0 else
"Showing all responses" if response_total == displayed_responses else
"Showing first {} responses".format(displayed_responses)
)
)
self.assertEqual(
self.thread_page.get_load_responses_button_text(),
(
None if response_total == displayed_responses else
"Load all responses" if response_total - displayed_responses < 100 else
"Load next 100 responses"
)
)
def test_pagination_no_responses(self):
self.setup_thread(0)
self.assert_response_display_correct(0, 0)
def test_pagination_few_responses(self):
self.setup_thread(5)
self.assert_response_display_correct(5, 5)
def test_pagination_two_response_pages(self):
self.setup_thread(50)
self.assert_response_display_correct(50, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(50, 50)
def test_pagination_exactly_two_response_pages(self):
self.setup_thread(125)
self.assert_response_display_correct(125, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(125, 125)
def test_pagination_three_response_pages(self):
self.setup_thread(150)
self.assert_response_display_correct(150, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(150, 125)
self.thread_page.load_more_responses()
self.assert_response_display_correct(150, 150)
def test_add_response_button(self):
self.setup_thread(5)
self.assertTrue(self.thread_page.has_add_response_button())
self.thread_page.click_add_response_button()
def test_add_response_button_closed_thread(self):
self.setup_thread(5, closed=True)
self.assertFalse(self.thread_page.has_add_response_button())
@attr('shard_1')
class DiscussionTabSingleThreadTest(UniqueCourseTest, DiscussionResponsePaginationTestMixin):
"""
Tests for the discussion page displaying a single thread
"""
def setUp(self):
super(DiscussionTabSingleThreadTest, self).setUp()
self.discussion_id = "test_discussion_{}".format(uuid4().hex)
# Create a course to register for
CourseFixture(**self.course_info).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def setup_thread_page(self, thread_id):
self.thread_page = DiscussionTabSingleThreadPage(self.browser, self.course_id, thread_id) # pylint:disable=W0201
self.thread_page.visit()
def test_marked_answer_comments(self):
thread_id = "test_thread_{}".format(uuid4().hex)
response_id = "test_response_{}".format(uuid4().hex)
comment_id = "test_comment_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(id=thread_id, commentable_id=self.discussion_id, thread_type="question")
)
thread_fixture.addResponse(
Response(id=response_id, endorsed=True),
[Comment(id=comment_id)]
)
thread_fixture.push()
self.setup_thread_page(thread_id)
self.assertFalse(self.thread_page.is_comment_visible(comment_id))
self.assertFalse(self.thread_page.is_add_comment_visible(response_id))
self.assertTrue(self.thread_page.is_show_comments_visible(response_id))
self.thread_page.show_comments(response_id)
self.assertTrue(self.thread_page.is_comment_visible(comment_id))
self.assertTrue(self.thread_page.is_add_comment_visible(response_id))
self.assertFalse(self.thread_page.is_show_comments_visible(response_id))
@attr('shard_1')
class DiscussionCommentDeletionTest(UniqueCourseTest):
"""
Tests for deleting comments displayed beneath responses in the single thread view.
"""
def setUp(self):
super(DiscussionCommentDeletionTest, self).setUp()
# Create a course to register for
CourseFixture(**self.course_info).install()
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="comment_deletion_test_thread"))
view.addResponse(
Response(id="response1"),
[Comment(id="comment_other_author", user_id="other"), Comment(id="comment_self_author", user_id=self.user_id)])
view.push()
def test_comment_deletion_as_student(self):
self.setup_user()
self.setup_view()
page = DiscussionTabSingleThreadPage(self.browser, self.course_id, "comment_deletion_test_thread")
page.visit()
self.assertTrue(page.is_comment_deletable("comment_self_author"))
self.assertTrue(page.is_comment_visible("comment_other_author"))
self.assertFalse(page.is_comment_deletable("comment_other_author"))
page.delete_comment("comment_self_author")
def test_comment_deletion_as_moderator(self):
self.setup_user(roles=['Moderator'])
self.setup_view()
page = DiscussionTabSingleThreadPage(self.browser, self.course_id, "comment_deletion_test_thread")
page.visit()
self.assertTrue(page.is_comment_deletable("comment_self_author"))
self.assertTrue(page.is_comment_deletable("comment_other_author"))
page.delete_comment("comment_self_author")
page.delete_comment("comment_other_author")
@attr('shard_1')
class DiscussionCommentEditTest(UniqueCourseTest):
"""
Tests for editing comments displayed beneath responses in the single thread view.
"""
def setUp(self):
super(DiscussionCommentEditTest, self).setUp()
# Create a course to register for
CourseFixture(**self.course_info).install()
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="comment_edit_test_thread"))
view.addResponse(
Response(id="response1"),
[Comment(id="comment_other_author", user_id="other"), Comment(id="comment_self_author", user_id=self.user_id)])
view.push()
def edit_comment(self, page, comment_id):
page.start_comment_edit(comment_id)
new_comment = "edited body"
page.set_comment_editor_value(comment_id, new_comment)
page.submit_comment_edit(comment_id, new_comment)
def test_edit_comment_as_student(self):
self.setup_user()
self.setup_view()
page = DiscussionTabSingleThreadPage(self.browser, self.course_id, "comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_visible("comment_other_author"))
self.assertFalse(page.is_comment_editable("comment_other_author"))
self.edit_comment(page, "comment_self_author")
def test_edit_comment_as_moderator(self):
self.setup_user(roles=["Moderator"])
self.setup_view()
page = DiscussionTabSingleThreadPage(self.browser, self.course_id, "comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_editable("comment_other_author"))
self.edit_comment(page, "comment_self_author")
self.edit_comment(page, "comment_other_author")
def test_cancel_comment_edit(self):
self.setup_user()
self.setup_view()
page = DiscussionTabSingleThreadPage(self.browser, self.course_id, "comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
page.set_comment_editor_value("comment_self_author", "edited body")
page.cancel_comment_edit("comment_self_author", original_body)
def test_editor_visibility(self):
"""Only one editor should be visible at a time within a single response"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = DiscussionTabSingleThreadPage(self.browser, self.course_id, "comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_editable("comment_other_author"))
self.assertTrue(page.is_add_comment_visible("response1"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
self.assertFalse(page.is_add_comment_visible("response1"))
self.assertTrue(page.is_comment_editor_visible("comment_self_author"))
page.set_comment_editor_value("comment_self_author", "edited body")
page.start_comment_edit("comment_other_author")
self.assertFalse(page.is_comment_editor_visible("comment_self_author"))
self.assertTrue(page.is_comment_editor_visible("comment_other_author"))
self.assertEqual(page.get_comment_body("comment_self_author"), original_body)
page.start_response_edit("response1")
self.assertFalse(page.is_comment_editor_visible("comment_other_author"))
self.assertTrue(page.is_response_editor_visible("response1"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
self.assertFalse(page.is_response_editor_visible("response1"))
self.assertTrue(page.is_comment_editor_visible("comment_self_author"))
page.cancel_comment_edit("comment_self_author", original_body)
self.assertFalse(page.is_comment_editor_visible("comment_self_author"))
self.assertTrue(page.is_add_comment_visible("response1"))
@attr('shard_1')
class InlineDiscussionTest(UniqueCourseTest, DiscussionResponsePaginationTestMixin):
"""
Tests for inline discussions
"""
def setUp(self):
super(InlineDiscussionTest, self).setUp()
self.discussion_id = "test_discussion_{}".format(uuid4().hex)
self.course_fix = CourseFixture(**self.course_info).add_children(
XBlockFixtureDesc("chapter", "Test Section").add_children(
XBlockFixtureDesc("sequential", "Test Subsection").add_children(
XBlockFixtureDesc("vertical", "Test Unit").add_children(
XBlockFixtureDesc(
"discussion",
"Test Discussion",
metadata={"discussion_id": self.discussion_id}
)
)
)
)
).install()
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id).visit().get_user_id()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.courseware_page.visit()
self.discussion_page = InlineDiscussionPage(self.browser, self.discussion_id)
def setup_thread_page(self, thread_id):
self.discussion_page.expand_discussion()
self.assertEqual(self.discussion_page.get_num_displayed_threads(), 1)
self.thread_page = InlineDiscussionThreadPage(self.browser, thread_id) # pylint:disable=W0201
self.thread_page.expand()
def test_initial_render(self):
self.assertFalse(self.discussion_page.is_discussion_expanded())
def test_expand_discussion_empty(self):
self.discussion_page.expand_discussion()
self.assertEqual(self.discussion_page.get_num_displayed_threads(), 0)
def check_anonymous_to_peers(self, is_staff):
thread = Thread(id=uuid4().hex, anonymous_to_peers=True, commentable_id=self.discussion_id)
thread_fixture = SingleThreadViewFixture(thread)
thread_fixture.push()
self.setup_thread_page(thread.get("id"))
self.assertEqual(self.thread_page.is_thread_anonymous(), not is_staff)
def test_anonymous_to_peers_threads_as_staff(self):
AutoAuthPage(self.browser, course_id=self.course_id, roles="Administrator").visit()
self.courseware_page.visit()
self.check_anonymous_to_peers(True)
def test_anonymous_to_peers_threads_as_peer(self):
self.check_anonymous_to_peers(False)
def test_discussion_blackout_period(self):
now = datetime.datetime.now(UTC)
self.course_fix.add_advanced_settings(
{
u"discussion_blackouts": {
"value": [
[
(now - datetime.timedelta(days=14)).isoformat(),
(now + datetime.timedelta(days=2)).isoformat()
]
]
}
}
)
self.course_fix._add_advanced_settings()
self.browser.refresh()
thread = Thread(id=uuid4().hex, commentable_id=self.discussion_id)
thread_fixture = SingleThreadViewFixture(thread)
thread_fixture.addResponse(
Response(id="response1"),
[Comment(id="comment1", user_id="other"), Comment(id="comment2", user_id=self.user_id)])
thread_fixture.push()
self.setup_thread_page(thread.get("id"))
self.assertFalse(self.discussion_page.element_exists(".new-post-btn"))
self.assertFalse(self.thread_page.has_add_response_button())
self.assertFalse(self.thread_page.is_response_editable("response1"))
self.assertFalse(self.thread_page.is_add_comment_visible("response1"))
self.assertFalse(self.thread_page.is_comment_editable("comment1"))
self.assertFalse(self.thread_page.is_comment_editable("comment2"))
self.assertFalse(self.thread_page.is_comment_deletable("comment1"))
self.assertFalse(self.thread_page.is_comment_deletable("comment2"))
@attr('shard_1')
class DiscussionUserProfileTest(UniqueCourseTest):
"""
Tests for user profile page in discussion tab.
"""
PAGE_SIZE = 20 # django_comment_client.forum.views.THREADS_PER_PAGE
PROFILED_USERNAME = "profiled-user"
def setUp(self):
super(DiscussionUserProfileTest, self).setUp()
CourseFixture(**self.course_info).install()
# The following line creates a user enrolled in our course, whose
# threads will be viewed, but not the one who will view the page.
# It isn't necessary to log them in, but using the AutoAuthPage
# saves a lot of code.
self.profiled_user_id = AutoAuthPage(
self.browser,
username=self.PROFILED_USERNAME,
course_id=self.course_id
).visit().get_user_id()
# now create a second user who will view the profile.
self.user_id = AutoAuthPage(
self.browser,
course_id=self.course_id
).visit().get_user_id()
def check_pages(self, num_threads):
# set up the stub server to return the desired amount of thread results
threads = [Thread(id=uuid4().hex) for _ in range(num_threads)]
UserProfileViewFixture(threads).push()
# navigate to default view (page 1)
page = DiscussionUserProfilePage(
self.browser,
self.course_id,
self.profiled_user_id,
self.PROFILED_USERNAME
)
page.visit()
current_page = 1
total_pages = max(num_threads - 1, 1) / self.PAGE_SIZE + 1
all_pages = range(1, total_pages + 1)
def _check_page():
# ensure the page being displayed as "current" is the expected one
self.assertEqual(page.get_current_page(), current_page)
# ensure the expected threads are being shown in the right order
threads_expected = threads[(current_page - 1) * self.PAGE_SIZE:current_page * self.PAGE_SIZE]
self.assertEqual(page.get_shown_thread_ids(), [t["id"] for t in threads_expected])
# ensure the clickable page numbers are the expected ones
self.assertEqual(page.get_clickable_pages(), [
p for p in all_pages
if p != current_page
and p - 2 <= current_page <= p + 2
or (current_page > 2 and p == 1)
or (current_page < total_pages and p == total_pages)
])
# ensure the previous button is shown, but only if it should be.
# when it is shown, make sure it works.
if current_page > 1:
self.assertTrue(page.is_prev_button_shown(current_page - 1))
page.click_prev_page()
self.assertEqual(page.get_current_page(), current_page - 1)
page.click_next_page()
self.assertEqual(page.get_current_page(), current_page)
else:
self.assertFalse(page.is_prev_button_shown())
# ensure the next button is shown, but only if it should be.
if current_page < total_pages:
self.assertTrue(page.is_next_button_shown(current_page + 1))
else:
self.assertFalse(page.is_next_button_shown())
# click all the way up through each page
for i in range(current_page, total_pages):
_check_page()
if current_page < total_pages:
page.click_on_page(current_page + 1)
current_page += 1
# click all the way back down
for i in range(current_page, 0, -1):
_check_page()
if current_page > 1:
page.click_on_page(current_page - 1)
current_page -= 1
def test_0_threads(self):
self.check_pages(0)
def test_1_thread(self):
self.check_pages(1)
def test_20_threads(self):
self.check_pages(20)
def test_21_threads(self):
self.check_pages(21)
def test_151_threads(self):
self.check_pages(151)
@attr('shard_1')
class DiscussionSearchAlertTest(UniqueCourseTest):
"""
Tests for spawning and dismissing alerts related to user search actions and their results.
"""
SEARCHED_USERNAME = "gizmo"
def setUp(self):
super(DiscussionSearchAlertTest, self).setUp()
CourseFixture(**self.course_info).install()
# first auto auth call sets up a user that we will search for in some tests
self.searched_user_id = AutoAuthPage(
self.browser,
username=self.SEARCHED_USERNAME,
course_id=self.course_id
).visit().get_user_id()
# this auto auth call creates the actual session user
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.page = DiscussionTabHomePage(self.browser, self.course_id)
self.page.visit()
def setup_corrected_text(self, text):
SearchResultFixture(SearchResult(corrected_text=text)).push()
def check_search_alert_messages(self, expected):
actual = self.page.get_search_alert_messages()
self.assertTrue(all(map(lambda msg, sub: msg.lower().find(sub.lower()) >= 0, actual, expected)))
def test_no_rewrite(self):
self.setup_corrected_text(None)
self.page.perform_search()
self.check_search_alert_messages(["no threads"])
def test_rewrite_dismiss(self):
self.setup_corrected_text("foo")
self.page.perform_search()
self.check_search_alert_messages(["foo"])
self.page.dismiss_alert_message("foo")
self.check_search_alert_messages([])
def test_new_search(self):
self.setup_corrected_text("foo")
self.page.perform_search()
self.check_search_alert_messages(["foo"])
self.setup_corrected_text("bar")
self.page.perform_search()
self.check_search_alert_messages(["bar"])
self.setup_corrected_text(None)
self.page.perform_search()
self.check_search_alert_messages(["no threads"])
def test_rewrite_and_user(self):
self.setup_corrected_text("foo")
self.page.perform_search(self.SEARCHED_USERNAME)
self.check_search_alert_messages(["foo", self.SEARCHED_USERNAME])
def test_user_only(self):
self.setup_corrected_text(None)
self.page.perform_search(self.SEARCHED_USERNAME)
self.check_search_alert_messages(["no threads", self.SEARCHED_USERNAME])
# make sure clicking the link leads to the user profile page
UserProfileViewFixture([]).push()
self.page.get_search_alert_links().first.click()
DiscussionUserProfilePage(
self.browser,
self.course_id,
self.searched_user_id,
self.SEARCHED_USERNAME
).wait_for_page()
@attr('shard_1')
class DiscussionSortPreferenceTest(UniqueCourseTest):
"""
Tests for the discussion page displaying a single thread.
"""
def setUp(self):
super(DiscussionSortPreferenceTest, self).setUp()
# Create a course to register for.
CourseFixture(**self.course_info).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.sort_page = DiscussionSortPreferencePage(self.browser, self.course_id)
self.sort_page.visit()
def test_default_sort_preference(self):
"""
Test to check the default sorting preference of user. (Default = date )
"""
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, "date")
def test_change_sort_preference(self):
"""
Test that if user sorting preference is changing properly.
"""
selected_sort = ""
for sort_type in ["votes", "comments", "date"]:
self.assertNotEqual(selected_sort, sort_type)
self.sort_page.change_sort_preference(sort_type)
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
def test_last_preference_saved(self):
"""
Test that user last preference is saved.
"""
selected_sort = ""
for sort_type in ["votes", "comments", "date"]:
self.assertNotEqual(selected_sort, sort_type)
self.sort_page.change_sort_preference(sort_type)
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
self.sort_page.refresh_page()
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
| agpl-3.0 | -3,555,889,301,510,238,700 | 40.100324 | 123 | 0.641693 | false |
ciaracdb/ACMusic | acmusic/settings.py | 1 | 4739 | """
Django settings for gettingstarted project, on Heroku. For more info, see:
https://github.com/heroku/heroku-django-template
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: change this before deploying to production!
SECRET_KEY = 'i+acxn5(akgsn!sr4^ghjqgf(^m&*@+g1@u^46gt@=8s@axc41ml*f=s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'webpack_loader',
'dbapi.apps.DbapiConfig',
'mainsite.apps.MainsiteConfig'
)
MIDDLEWARE_CLASSES = (
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'acmusic.urls'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny',
],
'PAGE_SIZE': 10
}
WEBPACK_LOADER = {
'DEFAULT': {
'CACHE': not DEBUG,
'BUNDLE_DIR_NAME': 'bundles/', # must end with slash
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),
'POLL_INTERVAL': 0.1,
'TIMEOUT': None,
'IGNORE': ['.+\.hot-update.js', '.+\.map']
}
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'debug': True,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'acmusic.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Montreal'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
DATABASES['default']['TEST'] = {'NAME': DATABASES['default']['NAME']}
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
os.path.join(BASE_DIR, 'reactapp')
)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
| apache-2.0 | 228,340,471,517,918,800 | 28.993671 | 91 | 0.685588 | false |
ContinuumIO/ashiba | enaml/enaml/qt/qt_dock_area.py | 1 | 8630 | #------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
import os
from atom.api import Typed
from enaml.widgets.dock_area import ProxyDockArea
from enaml.widgets.dock_events import DockItemEvent
from .QtCore import QObject, QEvent, QSize, QTimer
from .QtGui import QTabWidget
from .docking.dock_manager import DockManager
from .docking.event_types import (
DockItemDocked, DockItemUndocked, DockItemExtended, DockItemRetracted,
DockItemShown, DockItemHidden, DockItemClosed, DockTabSelected
)
from .docking.q_dock_area import QDockArea
from .docking.style_sheets import get_style_sheet
from .qt_constraints_widget import QtConstraintsWidget
from .qt_dock_item import QtDockItem
TAB_POSITIONS = {
'top': QTabWidget.North,
'bottom': QTabWidget.South,
'left': QTabWidget.West,
'right': QTabWidget.East,
}
EVENT_TYPES = {
DockItemDocked: DockItemEvent.Docked,
DockItemUndocked: DockItemEvent.Undocked,
DockItemExtended: DockItemEvent.Extended,
DockItemRetracted: DockItemEvent.Retracted,
DockItemShown: DockItemEvent.Shown,
DockItemHidden: DockItemEvent.Hidden,
DockItemClosed: DockItemEvent.Closed,
DockTabSelected: DockItemEvent.TabSelected,
}
class DockLayoutFilter(QObject):
""" An event filter used by the QtDockArea.
This event filter listens for LayoutRequest events on the dock
area widget, and will send a size_hint_updated notification to
the constraints system when the dock area size hint changes. The
notifications are collapsed on a single shot timer so that the
dock area geometry can fully settle before being snapped by the
constraints layout engine.
"""
def __init__(self, owner):
super(DockLayoutFilter, self).__init__()
self._owner = owner
self._size_hint = QSize()
self._pending = False
self._timer = timer = QTimer()
timer.setSingleShot(True)
timer.timeout.connect(self.onNotify)
def onNotify(self):
self._owner.size_hint_updated()
self._pending = False
def eventFilter(self, obj, event):
if not self._pending and event.type() == QEvent.LayoutRequest:
hint = obj.sizeHint()
if hint != self._size_hint:
self._size_hint = hint
self._timer.start(0)
self._pending = True
return False
class DockEventFilter(QObject):
""" An event filter used by the QtDockArea.
This event filter listens for dock events on the dock area widget,
converts them to front-end events, and posts them to the front-end
declaration object.
"""
def __init__(self, owner):
super(DockEventFilter, self).__init__()
self._owner = owner
def eventFilter(self, obj, event):
e_type = EVENT_TYPES.get(event.type())
if e_type is not None:
d = self._owner.declaration
if d is not None:
d.dock_event(DockItemEvent(type=e_type, name=event.name()))
return False
class QtDockArea(QtConstraintsWidget, ProxyDockArea):
""" A Qt implementation of an Enaml DockArea.
"""
#: A reference to the widget created by the proxy.
widget = Typed(QDockArea)
#: The docking manager which will drive the dock area.
manager = Typed(DockManager)
#: The event filter which listens for layout requests.
dock_layout_filter = Typed(DockLayoutFilter)
#: The event filter which listens for dock events.
dock_event_filter = Typed(DockEventFilter)
#--------------------------------------------------------------------------
# Initialization API
#--------------------------------------------------------------------------
def create_widget(self):
""" Create the underlying QDockArea widget.
"""
self.widget = QDockArea(self.parent_widget())
self.manager = DockManager(self.widget)
self.dock_event_filter = DockEventFilter(self)
self.dock_layout_filter = DockLayoutFilter(self)
def init_widget(self):
""" Initialize the underlying widget.
"""
super(QtDockArea, self).init_widget()
d = self.declaration
self.set_tab_position(d.tab_position)
self.set_live_drag(d.live_drag)
if d.style:
self.set_style(d.style)
self.set_dock_events_enabled(d.dock_events_enabled)
def init_layout(self):
""" Initialize the layout of the underlying control.
"""
super(QtDockArea, self).init_layout()
manager = self.manager
for item in self.dock_items():
manager.add_item(item)
d = self.declaration
self.apply_layout(d.layout)
self.widget.installEventFilter(self.dock_layout_filter)
def destroy(self):
""" A reimplemented destructor.
This removes the event filter from the dock area and releases
the items from the dock manager.
"""
self.widget.removeEventFilter(self.dock_layout_filter)
self.widget.removeEventFilter(self.dock_event_filter)
del self.dock_layout_filter
del self.dock_event_filter
self.manager.destroy()
super(QtDockArea, self).destroy()
#--------------------------------------------------------------------------
# Utility Methods
#--------------------------------------------------------------------------
def dock_items(self):
""" Get an iterable of QDockItem children for this area.
"""
for d in self.declaration.dock_items():
w = d.proxy.widget
if w is not None:
yield w
#--------------------------------------------------------------------------
# Child Events
#--------------------------------------------------------------------------
def child_added(self, child):
""" Handle the child added event for a QtDockArea.
"""
super(QtDockArea, self).child_added(child)
if isinstance(child, QtDockItem):
w = child.widget
if w is not None:
self.manager.add_item(w)
def child_removed(self, child):
""" Handle the child removed event for a QtDockArea.
"""
super(QtDockArea, self).child_removed(child)
if isinstance(child, QtDockItem):
w = child.widget
if w is not None:
self.manager.remove_item(w)
#--------------------------------------------------------------------------
# ProxyDockArea API
#--------------------------------------------------------------------------
def set_tab_position(self, position):
""" Set the default tab position on the underyling widget.
"""
self.widget.setTabPosition(TAB_POSITIONS[position])
def set_live_drag(self, live_drag):
""" Set the live drag state for the underlying widget.
"""
self.widget.setOpaqueItemResize(live_drag)
def set_style(self, style):
""" Set the style for the underlying widget.
"""
self.widget.setStyleSheet(get_style_sheet(style))
def set_dock_events_enabled(self, enabled):
""" Set whether or not dock events are enabled for the area.
"""
widget = self.widget
widget.setDockEventsEnabled(enabled)
if enabled:
widget.installEventFilter(self.dock_event_filter)
else:
widget.removeEventFilter(self.dock_event_filter)
def save_layout(self):
""" Save the current layout on the underlying widget.
"""
layout = self.manager.save_layout()
if os.environ.get('ENAML_DEPRECATED_DOCK_LAYOUT'):
from enaml.layout.dock_layout import convert_to_old_docklayout
layout = convert_to_old_docklayout(layout)
return layout
def apply_layout(self, layout):
""" Apply a new layout to the underlying widget.
"""
if os.environ.get('ENAML_DEPRECATED_DOCK_LAYOUT'):
from enaml.layout.dock_layout import convert_to_new_docklayout
layout = convert_to_new_docklayout(layout)
self.manager.apply_layout(layout)
def update_layout(self, ops):
""" Update the layout from a list of layout operations.
"""
self.manager.update_layout(ops)
| bsd-3-clause | 3,861,476,787,407,625,700 | 32.065134 | 79 | 0.585747 | false |
dstroppa/openstack-smartos-nova-grizzly | nova/virt/driver.py | 1 | 37731 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver base-classes:
(Beginning of) the contract that compute drivers must follow, and shared
types that support that contract
"""
import sys
from oslo.config import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import event as virtevent
driver_opts = [
cfg.StrOpt('compute_driver',
help='Driver to use for controlling virtualization. Options '
'include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, '
'fake.FakeDriver, baremetal.BareMetalDriver, '
'vmwareapi.VMwareESXDriver, vmwareapi.VMwareVCDriver, '
'smartosapi.SmartOSDriver'),
cfg.StrOpt('default_ephemeral_format',
default=None,
help='The default format an ephemeral_volume will be '
'formatted with on creation.'),
cfg.StrOpt('preallocate_images',
default='none',
help='VM image preallocation mode: '
'"none" => no storage provisioning is done up front, '
'"space" => storage is fully allocated at instance start'),
cfg.BoolOpt('use_cow_images',
default=True,
help='Whether to use cow images'),
]
CONF = cfg.CONF
CONF.register_opts(driver_opts)
LOG = logging.getLogger(__name__)
def driver_dict_from_config(named_driver_config, *args, **kwargs):
driver_registry = dict()
for driver_str in named_driver_config:
driver_type, _sep, driver = driver_str.partition('=')
driver_class = importutils.import_class(driver)
driver_registry[driver_type] = driver_class(*args, **kwargs)
return driver_registry
def block_device_info_get_root(block_device_info):
block_device_info = block_device_info or {}
return block_device_info.get('root_device_name')
def block_device_info_get_swap(block_device_info):
block_device_info = block_device_info or {}
return block_device_info.get('swap') or {'device_name': None,
'swap_size': 0}
def swap_is_usable(swap):
return swap and swap['device_name'] and swap['swap_size'] > 0
def block_device_info_get_ephemerals(block_device_info):
block_device_info = block_device_info or {}
ephemerals = block_device_info.get('ephemerals') or []
return ephemerals
def block_device_info_get_mapping(block_device_info):
block_device_info = block_device_info or {}
block_device_mapping = block_device_info.get('block_device_mapping') or []
return block_device_mapping
class ComputeDriver(object):
"""Base class for compute drivers.
The interface to this class talks in terms of 'instances' (Amazon EC2 and
internal Nova terminology), by which we mean 'running virtual machine'
(XenAPI terminology) or domain (Xen or libvirt terminology).
An instance has an ID, which is the identifier chosen by Nova to represent
the instance further up the stack. This is unfortunately also called a
'name' elsewhere. As far as this layer is concerned, 'instance ID' and
'instance name' are synonyms.
Note that the instance ID or name is not human-readable or
customer-controlled -- it's an internal ID chosen by Nova. At the
nova.virt layer, instances do not have human-readable names at all -- such
things are only known higher up the stack.
Most virtualization platforms will also have their own identity schemes,
to uniquely identify a VM or domain. These IDs must stay internal to the
platform-specific layer, and never escape the connection interface. The
platform-specific layer is responsible for keeping track of which instance
ID maps to which platform-specific ID, and vice versa.
Some methods here take an instance of nova.compute.service.Instance. This
is the data structure used by nova.compute to store details regarding an
instance, and pass them into this layer. This layer is responsible for
translating that generic data structure into terms that are specific to the
virtualization platform.
"""
capabilities = {
"has_imagecache": False,
"supports_recreate": False,
}
def __init__(self, virtapi):
self.virtapi = virtapi
self._compute_event_callback = None
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function,
including catching up with currently running VM's on the given host."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_info(self, instance):
"""Get the current status of an instance, by name (not ID!)
Returns a dict containing:
:state: the running state, one of the power_state codes
:max_mem: (int) the maximum memory in KBytes allowed
:mem: (int) the memory in KBytes used by the domain
:num_cpu: (int) the number of virtual CPUs for the domain
:cpu_time: (int) the CPU time used in nanoseconds
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_num_instances(self):
"""Return the total number of virtual machines.
Return the number of virtual machines that the hypervisor knows
about.
.. note::
This implementation works for all drivers, but it is
not particularly efficient. Maintainers of the virt drivers are
encouraged to override this method with something more
efficient.
"""
return len(self.list_instances())
def instance_exists(self, instance_id):
"""Checks existence of an instance on the host.
:param instance_id: The ID / name of the instance to lookup
Returns True if an instance with the supplied ID exists on
the host, False otherwise.
.. note::
This implementation works for all drivers, but it is
not particularly efficient. Maintainers of the virt drivers are
encouraged to override this method with something more
efficient.
"""
return instance_id in self.list_instances()
def list_instances(self):
"""
Return the names of all the instances known to the virtualization
layer, as a list.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def list_instance_uuids(self):
"""
Return the UUIDS of all the instances known to the virtualization
layer, as a list.
"""
raise NotImplementedError()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""
Create a new instance/VM/domain on the virtualization platform.
Once this successfully completes, the instance should be
running (power_state.RUNNING).
If this fails, any partial instance should be completely
cleaned up, and the virtualization platform should be in the state
that it was before this call began.
:param context: security context
:param instance: Instance object as returned by DB layer.
This function should use the data there to guide
the creation of the new instance.
:param image_meta: image object returned by nova.image.glance that
defines the image from which to boot this instance
:param injected_files: User files to inject into instance.
:param admin_password: Administrator password to set in instance.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices to be
attached to the instance.
"""
raise NotImplementedError()
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy (shutdown and delete) the specified instance.
If the instance is not found (for example if networking failed), this
function should still succeed. It's probably a good idea to log a
warning in that case.
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices that should
be detached from the instance.
:param destroy_disks: Indicates if disks should be destroyed
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None):
"""Reboot the specified instance.
After this is called successfully, the instance's state
goes back to power_state.RUNNING. The virtualization
platform should ensure that the reboot action has completed
successfully even in cases in which the underlying domain/vm
is paused or halted/stopped.
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param reboot_type: Either a HARD or SOFT reboot
"""
raise NotImplementedError()
def get_console_pool_info(self, console_type):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_console_output(self, instance):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_vnc_console(self, instance):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_spice_console(self, instance):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM"""
raise NotImplementedError()
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host"""
raise NotImplementedError()
def get_host_ip_addr(self):
"""
Retrieves the IP address of the dom0
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def attach_volume(self, connection_info, instance, mountpoint):
"""Attach the disk to the instance at mountpoint using info."""
raise NotImplementedError()
def detach_volume(self, connection_info, instance, mountpoint):
"""Detach the disk attached to the instance."""
raise NotImplementedError()
def attach_interface(self, instance, image_meta, network_info):
"""Attach an interface to the instance."""
raise NotImplementedError()
def detach_interface(self, instance, network_info):
"""Detach an interface from the instance."""
raise NotImplementedError()
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
"""
Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
"""
raise NotImplementedError()
def snapshot(self, context, instance, image_id, update_task_state):
"""
Snapshots the specified instance.
:param context: security context
:param instance: Instance object as returned by DB layer.
:param image_id: Reference to a pre-created image that will
hold the snapshot.
"""
raise NotImplementedError()
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None):
"""Completes a resize, turning on the migrated instance
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param image_meta: image object returned by nova.image.glance that
defines the image from which this instance
was created
"""
raise NotImplementedError()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
"""Finish reverting a resize, powering back on the instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def pause(self, instance):
"""Pause the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unpause(self, instance):
"""Unpause paused VM instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def suspend(self, instance):
"""suspend the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def resume(self, instance, network_info, block_device_info=None):
"""resume the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
raise NotImplementedError()
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance."""
raise NotImplementedError()
def unrescue(self, instance, network_info):
"""Unrescue the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def power_off(self, instance):
"""Power off the specified instance."""
raise NotImplementedError()
def power_on(self, instance):
"""Power on the specified instance."""
raise NotImplementedError()
def soft_delete(self, instance):
"""Soft delete the specified instance."""
raise NotImplementedError()
def restore(self, instance):
"""Restore the specified instance."""
raise NotImplementedError()
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task
:param nodename:
node which the caller want to get resources from
a driver that manages only one node can safely ignore this
:returns: Dictionary describing resources
"""
raise NotImplementedError()
def pre_live_migration(self, ctxt, instance_ref,
block_device_info, network_info,
migrate_data=None):
"""Prepare an instance for live migration
:param ctxt: security context
:param instance_ref: instance object that will be migrated
:param block_device_info: instance block device information
:param network_info: instance network information
:param migrate_data: implementation specific data dict.
"""
raise NotImplementedError()
def pre_block_migration(self, ctxt, instance_ref, disk_info):
"""Prepare a block device for migration
:param ctxt: security context
:param instance_ref: instance object that will have its disk migrated
:param disk_info: information about disk to be migrated (as returned
from get_instance_disk_info())
"""
raise NotImplementedError()
def live_migration(self, ctxt, instance_ref, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Live migration of an instance to another host.
:params ctxt: security context
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:params block_migration: if true, migrate VM disk.
:params migrate_data: implementation specific params.
"""
raise NotImplementedError()
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info,
block_migration=False,
block_device_info=None):
"""Post operation of live migration at destination host.
:param ctxt: security context
:param instance_ref: instance object that is migrated
:param network_info: instance network information
:param block_migration: if true, post operation of block_migration.
"""
raise NotImplementedError()
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param ctxt: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param src_compute_info: Info about the sending machine
:param dst_compute_info: Info about the receiving machine
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
"""
raise NotImplementedError()
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param ctxt: security context
:param dest_check_data: result of check_can_live_migrate_destination
"""
raise NotImplementedError()
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
"""
raise NotImplementedError()
def refresh_security_group_rules(self, security_group_id):
"""This method is called after a change to security groups.
All security groups and their associated rules live in the datastore,
and calling this method should apply the updated rules to instances
running the specified security group.
An error should be raised if the operation cannot complete.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_security_group_members(self, security_group_id):
"""This method is called when a security group is added to an instance.
This message is sent to the virtualization drivers on hosts that are
running an instance that belongs to a security group that has a rule
that references the security group identified by `security_group_id`.
It is the responsibility of this method to make sure any rules
that authorize traffic flow with members of the security group are
updated and any new members can communicate, and any removed members
cannot.
Scenario:
* we are running on host 'H0' and we have an instance 'i-0'.
* instance 'i-0' is a member of security group 'speaks-b'
* group 'speaks-b' has an ingress rule that authorizes group 'b'
* another host 'H1' runs an instance 'i-1'
* instance 'i-1' is a member of security group 'b'
When 'i-1' launches or terminates we will receive the message
to update members of group 'b', at which time we will make
any changes needed to the rules for instance 'i-0' to allow
or deny traffic coming from 'i-1', depending on if it is being
added or removed from the group.
In this scenario, 'i-1' could just as easily have been running on our
host 'H0' and this method would still have been called. The point was
that this method isn't called on the host where instances of that
group are running (as is the case with
:py:meth:`refresh_security_group_rules`) but is called where references
are made to authorizing those instances.
An error should be raised if the operation cannot complete.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_provider_fw_rules(self):
"""This triggers a firewall update based on database changes.
When this is called, rules have either been added or removed from the
datastore. You can retrieve rules with
:py:meth:`nova.db.provider_fw_rule_get_all`.
Provider rules take precedence over security group rules. If an IP
would be allowed by a security group ingress rule, but blocked by
a provider rule, then packets from the IP are dropped. This includes
intra-project traffic in the case of the allow_project_net_traffic
flag for the libvirt-derived classes.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def reset_network(self, instance):
"""reset networking for specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
pass
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
"""Setting up filtering rules and waiting for its completion.
To migrate an instance, filtering rules to hypervisors
and firewalls are inevitable on destination host.
( Waiting only for filtering rules to hypervisor,
since filtering rules to firewall rules can be set faster).
Concretely, the below method must be called.
- setup_basic_filtering (for nova-basic, etc.)
- prepare_instance_filter(for nova-instance-instance-xxx, etc.)
to_xml may have to be called since it defines PROJNET, PROJMASK.
but libvirt migrates those value through migrateToURI(),
so , no need to be called.
Don't use thread for this method since migration should
not be started when setting-up filtering rules operations
are not completed.
:params instance_ref: nova.db.sqlalchemy.models.Instance object
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def filter_defer_apply_on(self):
"""Defer application of IPTables rules."""
pass
def filter_defer_apply_off(self):
"""Turn off deferral of IPTables rules and apply the rules now."""
pass
def unfilter_instance(self, instance, network_info):
"""Stop filtering instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def set_admin_password(self, context, instance_id, new_pass=None):
"""
Set the root password on the specified instance.
The first parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name. The second
parameter is the value of the new password.
"""
raise NotImplementedError()
def inject_file(self, instance, b64_path, b64_contents):
"""
Writes a file on the specified instance.
The first parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name. The second
parameter is the base64-encoded path to which the file is to be
written on the instance; the third is the contents of the file, also
base64-encoded.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def change_instance_metadata(self, context, instance, diff):
"""
Applies a diff to the instance metadata.
This is an optional driver method which is used to publish
changes to the instance's metadata to the hypervisor. If the
hypervisor has no means of publishing the instance metadata to
the instance, then this method should not be implemented.
"""
pass
def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
pass
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances
:param timeout: the currently configured timeout for considering
rebooting instances to be stuck
:param instances: instances that have been in rebooting state
longer than the configured timeout
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
raise NotImplementedError()
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
raise NotImplementedError()
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_host_uptime(self, host):
"""Returns the result of calling "uptime" on the target host."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
raise NotImplementedError()
def get_host_stats(self, refresh=False):
"""Return currently known host stats."""
raise NotImplementedError()
def block_stats(self, instance_name, disk_id):
"""
Return performance counters associated with the given disk_id on the
given instance_name. These are returned as [rd_req, rd_bytes, wr_req,
wr_bytes, errs], where rd indicates read, wr indicates write, req is
the total number of I/O requests made, bytes is the total number of
bytes transferred, and errs is the number of requests held up due to a
full pipeline.
All counters are long integers.
This method is optional. On some platforms (e.g. XenAPI) performance
statistics can be retrieved directly in aggregate form, without Nova
having to do the aggregation. On those platforms, this method is
unused.
Note that this function takes an instance ID.
"""
raise NotImplementedError()
def interface_stats(self, instance_name, iface_id):
"""
Return performance counters associated with the given iface_id on the
given instance_id. These are returned as [rx_bytes, rx_packets,
rx_errs, rx_drop, tx_bytes, tx_packets, tx_errs, tx_drop], where rx
indicates receive, tx indicates transmit, bytes and packets indicate
the total number of bytes or packets transferred, and errs and dropped
is the total number of packets failed / dropped.
All counters are long integers.
This method is optional. On some platforms (e.g. XenAPI) performance
statistics can be retrieved directly in aggregate form, without Nova
having to do the aggregation. On those platforms, this method is
unused.
Note that this function takes an instance ID.
"""
raise NotImplementedError()
def legacy_nwinfo(self):
"""True if the driver requires the legacy network_info format."""
# TODO(tr3buchet): update all subclasses and remove this method and
# related helpers.
raise NotImplementedError(self.legacy_nwinfo)
def macs_for_instance(self, instance):
"""What MAC addresses must this instance have?
Some hypervisors (such as bare metal) cannot do freeform virtualisation
of MAC addresses. This method allows drivers to return a set of MAC
addresses that the instance is to have. allocate_for_instance will take
this into consideration when provisioning networking for the instance.
Mapping of MAC addresses to actual networks (or permitting them to be
freeform) is up to the network implementation layer. For instance,
with openflow switches, fixed MAC addresses can still be virtualised
onto any L2 domain, with arbitrary VLANs etc, but regular switches
require pre-configured MAC->network mappings that will match the
actual configuration.
Most hypervisors can use the default implementation which returns None.
Hypervisors with MAC limits should return a set of MAC addresses, which
will be supplied to the allocate_for_instance call by the compute
manager, and it is up to that call to ensure that all assigned network
details are compatible with the set of MAC addresses.
This is called during spawn_instance by the compute manager.
:return: None, or a set of MAC ids (e.g. set(['12:34:56:78:90:ab'])).
None means 'no constraints', a set means 'these and only these
MAC addresses'.
"""
return None
def manage_image_cache(self, context, all_instances):
"""
Manage the driver's local image cache.
Some drivers chose to cache images for instances on disk. This method
is an opportunity to do management of that cache which isn't directly
related to other calls into the driver. The prime example is to clean
the cache and remove images which are no longer of interest.
"""
pass
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
#NOTE(jogo) Currently only used for XenAPI-Pool
raise NotImplementedError()
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
raise NotImplementedError()
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
"""Undo for Resource Pools."""
raise NotImplementedError()
def get_volume_connector(self, instance):
"""Get connector information for the instance for attaching to volumes.
Connector information is a dictionary representing the ip of the
machine that will be making the connection, the name of the iscsi
initiator and the hostname of the machine as follows::
{
'ip': ip,
'initiator': initiator,
'host': hostname
}
"""
raise NotImplementedError()
def get_available_nodes(self):
"""Returns nodenames of all nodes managed by the compute service.
This method is for multi compute-nodes support. If a driver supports
multi compute-nodes, this method returns a list of nodenames managed
by the service. Otherwise, this method should return
[hypervisor_hostname].
"""
stats = self.get_host_stats(refresh=True)
if not isinstance(stats, list):
stats = [stats]
return [s['hypervisor_hostname'] for s in stats]
def get_per_instance_usage(self):
"""Get information about instance resource usage.
:returns: dict of nova uuid => dict of usage info
"""
return {}
def instance_on_disk(self, instance):
"""Checks access of instance files on the host.
:param instance: instance to lookup
Returns True if files of an instance with the supplied ID accessible on
the host, False otherwise.
.. note::
Used in rebuild for HA implementation and required for validation
of access to instance shared disk files
"""
return False
def register_event_listener(self, callback):
"""Register a callback to receive events.
Register a callback to receive asynchronous event
notifications from hypervisors. The callback will
be invoked with a single parameter, which will be
an instance of the nova.virt.event.Event class."""
self._compute_event_callback = callback
def emit_event(self, event):
"""Dispatches an event to the compute manager.
Invokes the event callback registered by the
compute manager to dispatch the event. This
must only be invoked from a green thread."""
if not self._compute_event_callback:
LOG.debug("Discarding event %s" % str(event))
return
if not isinstance(event, virtevent.Event):
raise ValueError(
_("Event must be an instance of nova.virt.event.Event"))
try:
LOG.debug("Emitting event %s" % str(event))
self._compute_event_callback(event)
except Exception, ex:
LOG.error(_("Exception dispatching event %(event)s: %(ex)s")
% locals())
def load_compute_driver(virtapi, compute_driver=None):
"""Load a compute driver module.
Load the compute driver module specified by the compute_driver
configuration option or, if supplied, the driver name supplied as an
argument.
Compute drivers constructors take a VirtAPI object as their first object
and this must be supplied.
:param virtapi: a VirtAPI instance
:param compute_driver: a compute driver name to override the config opt
:returns: a ComputeDriver instance
"""
if not compute_driver:
compute_driver = CONF.compute_driver
if not compute_driver:
LOG.error(_("Compute driver option required, but not specified"))
sys.exit(1)
LOG.info(_("Loading compute driver '%s'") % compute_driver)
try:
driver = importutils.import_object_ns('nova.virt',
compute_driver,
virtapi)
return utils.check_isinstance(driver, ComputeDriver)
except ImportError as e:
LOG.error(_("Unable to load the virtualization driver: %s") % (e))
sys.exit(1)
def compute_driver_matches(match):
return CONF.compute_driver.endswith(match)
| apache-2.0 | 6,519,940,940,505,130,000 | 39.310897 | 79 | 0.64496 | false |
aneumeier/userprofile | userprofile/tests.py | 1 | 1713 | from django.test import TestCase
from .models import Profile
from datetime import date
class ViewsTest(TestCase):
"""
TestCase to test all exposed views for anonymous users.
"""
def setUp(self):
pass
def testHome(self):
response = self.client.get('/user/')
self.assertEquals(response.status_code, 200)
def testLogin(self):
response = self.client.get('/user/login/')
self.assertEquals(response.status_code, 200)
def testLogout(self):
response = self.client.get('/user/logout/')
self.assertEquals(response.status_code, 302)
self.assertRedirects(response, '/user/')
class ManagerTest(TestCase):
"""
Test managers
"""
fixtures = [
'user.yaml',
'profile.yaml',
]
def setUp(self):
pass
def testCount(self):
p = Profile.objects.count()
self.assertEquals(p, 5)
def testFemaleCount(self):
p = Profile.objects.female_count()
self.assertEquals(p, 2)
def testMaleCount(self):
p = Profile.objects.male_count()
self.assertEquals(p, 3)
class ModelTest(TestCase):
"""
Test models
"""
fixtures = [
'profile.yaml',
'user.yaml',
]
def setUp(self):
pass
def testAbsoluteUrl(self):
p = Profile.objects.get(pk=1)
url = p.get_absolute_url()
self.assertEqual(url, "/user/profile/1/")
def testAge(self):
p = Profile.objects.get(pk=1)
age = (date.today() - p.dob).days / 365
self.assertEquals(p.age, age)
def testRepr(self):
p = Profile.objects.get(pk=1)
self.assertEqual(str(p), "Johann (M, 2)")
| mit | -1,977,304,415,432,862,200 | 21.246753 | 59 | 0.586106 | false |
wegamekinglc/alpha-mind | alphamind/benchmarks/portfolio/rankbuild.py | 1 | 3088 | # -*- coding: utf-8 -*-
"""
Created on 2017-4-27
@author: cheng.li
"""
import datetime as dt
import numpy as np
import pandas as pd
from alphamind.portfolio.rankbuilder import rank_build
def benchmark_build_rank(n_samples: int, n_loops: int, n_included: int) -> None:
print("-" * 60)
print("Starting portfolio construction by rank benchmarking")
print("Parameters(n_samples: {0}, n_included: {1}, n_loops: {2})".format(n_samples, n_included,
n_loops))
n_portfolio = 10
x = np.random.randn(n_samples, n_portfolio)
start = dt.datetime.now()
for _ in range(n_loops):
calc_weights = rank_build(x, n_included)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
start = dt.datetime.now()
for _ in range(n_loops):
exp_weights = np.zeros((len(x), n_portfolio))
choosed_index = (-x).argsort(axis=0).argsort(axis=0) < n_included
for j in range(n_portfolio):
exp_weights[choosed_index[:, j], j] = 1.
benchmark_model_time = dt.datetime.now() - start
np.testing.assert_array_almost_equal(calc_weights, exp_weights)
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
def benchmark_build_rank_with_group(n_samples: int, n_loops: int, n_included: int,
n_groups: int) -> None:
print("-" * 60)
print("Starting portfolio construction by rank with group-by values benchmarking")
print(
"Parameters(n_samples: {0}, n_included: {1}, n_loops: {2}, n_groups: {3})".format(n_samples,
n_included,
n_loops,
n_groups))
n_portfolio = 10
x = np.random.randn(n_samples, n_portfolio)
groups = np.random.randint(n_groups, size=n_samples)
start = dt.datetime.now()
for _ in range(n_loops):
calc_weights = rank_build(x, n_included, groups=groups)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
start = dt.datetime.now()
for _ in range(n_loops):
grouped_ordering = pd.DataFrame(-x).groupby(groups).rank()
exp_weights = np.zeros((len(x), n_portfolio))
masks = (grouped_ordering <= n_included).values
for j in range(n_portfolio):
exp_weights[masks[:, j], j] = 1.
benchmark_model_time = dt.datetime.now() - start
np.testing.assert_array_almost_equal(calc_weights, exp_weights)
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
if __name__ == '__main__':
benchmark_build_rank(3000, 1000, 300)
benchmark_build_rank_with_group(3000, 1000, 10, 30)
| mit | 4,565,990,175,895,864,300 | 34.761905 | 101 | 0.540479 | false |
ryanmiao/libvirt-test-API | repos/setVcpus/vcpupin_config.py | 1 | 3547 | #!/usr/bin/env python
# Test domain vcpu pin with flag VIR_DOMAIN_AFFECT_CONFIG, check
# domain config xml with vcpupin configuration.
import re
from xml.dom import minidom
import libvirt
from libvirt import libvirtError
from src import sharedmod
from utils import utils
required_params = ('guestname', 'vcpu', 'cpulist',)
optional_params = {}
def vcpupin_check(domobj, vcpu, cpumap):
"""check domain config xml with vcpupin element
"""
guestxml = domobj.XMLDesc(2)
logger.debug("domain %s xml :\n%s" %(domobj.name(), guestxml))
doc = minidom.parseString(guestxml)
vcpupin = doc.getElementsByTagName('vcpupin')
if not vcpupin:
logger.error("no vcpupin element in domain xml")
return 1
for i in range(len(vcpupin)):
if vcpupin[i].hasAttribute('vcpu') and \
vcpupin[i].hasAttribute('cpuset'):
vcpu_attr = vcpupin[i].getAttributeNode('vcpu')
cpu_attr = vcpupin[i].getAttributeNode('cpuset')
if int(vcpu_attr.nodeValue) == vcpu:
cpulist = cpu_attr.nodeValue
if cpulist == '':
cpumap_tmp = ()
for i in range(maxcpu):
cpumap_tmp += (False,)
else:
cpumap_tmp = utils.param_to_tuple(cpulist, maxcpu)
if cpumap_tmp == cpumap:
logger.info("cpuset is as expected in domain xml")
return 0
else:
logger.error("cpuset is not as expected in domain xml")
return 1
if i == len(vcpupin) - 1:
logger.error("the vcpupin element with given vcpu is not found")
return 1
def vcpupin_config(params):
"""pin domain vcpu to host cpu with config flag
"""
global logger
logger = params['logger']
params.pop('logger')
guestname = params['guestname']
vcpu = int(params['vcpu'])
cpulist = params['cpulist']
logger.info("the name of virtual machine is %s" % guestname)
logger.info("the given vcpu is %s" % vcpu)
logger.info("the given cpulist is %s" % cpulist)
global maxcpu
maxcpu = utils.get_host_cpus()
logger.info("%s physical cpu on host" % maxcpu)
conn = sharedmod.libvirtobj['conn']
try:
domobj = conn.lookupByName(guestname)
cpumap = utils.param_to_tuple(cpulist, maxcpu)
if not cpumap:
logger.error("cpulist: Invalid format")
return 1
logger.debug("cpumap for vcpu pin is:")
logger.debug(cpumap)
logger.info("pin domain vcpu %s to host cpulist %s with flag: %s" %
(vcpu, cpulist, libvirt.VIR_DOMAIN_AFFECT_CONFIG))
domobj.pinVcpuFlags(vcpu, cpumap, libvirt.VIR_DOMAIN_AFFECT_CONFIG)
logger.info("check vcpu pin info")
ret = domobj.vcpuPinInfo(libvirt.VIR_DOMAIN_AFFECT_CONFIG)
logger.debug("vcpu pin info is:")
logger.debug(ret)
if ret[vcpu] == cpumap:
logger.info("vcpu pin info is expected")
else:
logger.error("vcpu pin info is not expected")
return 1
except libvirtError, e:
logger.error("libvirt call failed: " + str(e))
return 1
logger.info("check domain vcpupin configuration in xml")
ret = vcpupin_check(domobj, vcpu, cpumap)
if ret:
logger.error("domain vcpu pin check failed")
return 1
else:
logger.info("domain vcpu pin check succeed")
return 0
| gpl-2.0 | -5,593,081,498,587,919,000 | 31.541284 | 76 | 0.594587 | false |
kubaszostak/gdal-dragndrop | osgeo/apps/Python27/Scripts/esri2wkt.py | 1 | 2143 | #!C:\OSGEO4~1\bin\python.exe
# ******************************************************************************
# $Id: esri2wkt.py 7464f4b11b93bb2d1098d1b962907228932bf8c1 2018-05-03 19:56:49 +1000 Ben Elliston $
#
# Project: GDAL
# Purpose: Simple command line program for translating ESRI .prj files
# into WKT.
# Author: Frank Warmerdam, [email protected]
#
# ******************************************************************************
# Copyright (c) 2000, Frank Warmerdam
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
import sys
from osgeo import osr
if len(sys.argv) < 2:
print('Usage: esri2wkt.py <esri .prj file>')
sys.exit(1)
prj_fd = open(sys.argv[1])
prj_lines = prj_fd.readlines()
prj_fd.close()
for i, prj_line in enumerate(prj_lines):
prj_lines[i] = prj_line.rstrip()
prj_srs = osr.SpatialReference()
err = prj_srs.ImportFromESRI(prj_lines)
if err != 0:
print('Error = %d' % err)
else:
print(prj_srs.ExportToPrettyWkt())
| mit | -157,707,794,943,707,520 | 38.433962 | 101 | 0.631358 | false |
OpenHumans/open-humans | public_data/models.py | 1 | 3781 | from collections import OrderedDict
from itertools import groupby
from django.db import models
from django.db.models import F
from common.fields import AutoOneToOneField
from open_humans.models import Member
from private_sharing.models import (
DataRequestProjectMember,
ProjectDataFile,
id_label_to_project,
)
def is_public(member, source):
"""
Return whether a given member has publicly shared the given source.
"""
project = id_label_to_project(source)
return bool(
member.public_data_participant.publicdataaccess_set.filter(
project_membership__project=project, is_public=True
)
)
def public_count(project):
"""
Get number of users publicly sharing a project's data.
"""
count = (
PublicDataAccess.objects.filter(
project_membership__project=project,
# Filter to only count members with datafiles for this project.
is_public=True,
project_membership__project__in=F(
"project_membership__member__user__datafiles__"
"parent_project_data_file__direct_sharing_project"
),
)
.distinct()
.count()
)
return count
class Participant(models.Model):
"""
Represents a participant in the Public Data Sharing study.
"""
member = AutoOneToOneField(
Member, related_name="public_data_participant", on_delete=models.CASCADE
)
enrolled = models.BooleanField(default=False)
def _files_for_project(self, project):
return ProjectDataFile.objects.filter(
user=self.member.user, direct_sharing_project=project
).exclude(completed=False)
@property
def public_data_w_vis_membership_by_proj(self):
vis_projs_w_public_data = [
pda.project_membership.project
for pda in self.publicdataaccess_set.filter(
is_public=True, project_membership__visible=True
)
]
files = self.member.user.datafiles.filter(
parent_project_data_file__direct_sharing_project__in=vis_projs_w_public_data
).order_by("parent_project_data_file__direct_sharing_project", "created")
grouped_by_project = groupby(
files, key=lambda x: x.parent_project_data_file.direct_sharing_project
)
files_by_project = OrderedDict()
for proj, files in grouped_by_project:
files_by_project[proj] = []
for file in files:
files_by_project[proj].append(file)
return files_by_project
def __str__(self):
status = "Enrolled" if self.enrolled else "Not enrolled"
return str("{0}:{1}").format(self.member, status)
class PublicDataAccess(models.Model):
"""
Keep track of public sharing for a data source.
The data source is the DataRequestProject identified by the project_membership.
"""
participant = models.ForeignKey(Participant, on_delete=models.CASCADE)
project_membership = models.OneToOneField(
DataRequestProjectMember, on_delete=models.CASCADE
)
is_public = models.BooleanField(default=False)
def __str__(self):
status = "Private"
if self.is_public:
status = "Public"
return str("{0}:{1}:{2}").format(
self.participant.member.user.username,
self.project_membership.project.name,
status,
)
class WithdrawalFeedback(models.Model):
"""
Keep track of any feedback a study participant gives when they withdraw
from the study.
"""
member = models.ForeignKey(Member, on_delete=models.CASCADE)
feedback = models.TextField(blank=True)
withdrawal_date = models.DateTimeField(auto_now_add=True)
| mit | 3,227,372,887,171,134,000 | 29.491935 | 88 | 0.642687 | false |
bzcheeseman/pytorch-EMM | Examples/basic_controller.py | 1 | 15995 | #
# Created by Aman LaChapelle on 3/23/17.
#
# pytorch-EMM
# Copyright (c) 2017 Aman LaChapelle
# Full license at pytorch-EMM/LICENSE.txt
#
import torch
import torch.nn as nn
import torch.nn.functional as Funct
from torch.autograd import Variable
import torch.optim as optim
import numpy as np
from Utils import num_flat_features
from EMM import EMM_NTM, EMM_GPU
from Utils import CopyTask
class FeedForwardController(nn.Module):
def __init__(self,
num_inputs,
num_hidden,
batch_size,
num_reads=1,
memory_dims=(128, 20)):
super(FeedForwardController, self).__init__()
self.num_inputs = num_inputs
self.num_hidden = num_hidden
self.batch_size = batch_size
self.memory_dims = memory_dims
self.in_to_hid = nn.Linear(self.num_inputs, self.num_hidden)
self.read_to_hid = nn.Linear(self.memory_dims[1]*num_reads, self.num_hidden)
def forward(self, x, read):
x = x.contiguous()
x = x.view(-1, num_flat_features(x))
read = read.contiguous()
read = read.view(-1, num_flat_features(read))
x = Funct.relu(self.in_to_hid(x)) + Funct.relu(self.read_to_hid(read))
return x
class GRUController(nn.Module):
def __init__(self,
num_inputs,
num_hidden,
batch_size,
num_reads=1,
memory_dims=(128, 20)):
super(GRUController, self).__init__()
self.num_inputs = num_inputs
self.num_hidden = num_hidden
self.batch_size = batch_size
self.memory_dims = memory_dims
self.gru = nn.GRUCell(
input_size=self.num_inputs,
hidden_size=self.num_hidden
)
self.read_to_in = nn.Linear(self.memory_dims[1]*num_reads, self.num_inputs)
def forward(self, x, read, h_t):
x = x.contiguous()
r = Funct.relu(self.read_to_in(read))
r = r.view(*x.size())
x = Funct.relu(x + r)
x = x.view(-1, num_flat_features(x))
h_tp1 = self.gru(x, h_t)
return h_tp1
class NTM(nn.Module):
def __init__(self,
num_inputs,
num_hidden,
num_outputs,
batch_size,
num_reads,
memory_dims=(128, 20)):
super(NTM, self).__init__()
self.num_inputs = num_inputs
self.num_hidden = num_hidden
self.num_outputs = num_outputs
self.batch_size = batch_size
self.num_reads = num_reads
self.memory_dims = memory_dims
self.EMM = EMM_NTM(self.num_hidden, self.batch_size, num_reads=self.num_reads,
num_shifts=3, memory_dims=self.memory_dims)
self.controller = GRUController(self.num_inputs, self.num_hidden, self.batch_size,
num_reads=self.num_reads, memory_dims=self.memory_dims)
self.hid_to_out = nn.Linear(self.num_hidden, self.num_outputs)
def init_hidden(self):
wr, ww, memory = self.EMM.init_weights_mem()
hidden = Variable(torch.zeros(self.batch_size, self.num_hidden))
return hidden, wr, ww, memory
def forward(self, x, h, wr, ww, m):
x = x.permute(1, 0, 2, 3)
def step(x_t, h_t, wr_t, ww_t, m_t):
r_t, wr_t, ww_t, m_t = self.EMM(h_t, wr_t, ww_t, m_t)
h_t = self.controller(x_t, r_t, h_t)
out = Funct.sigmoid(self.hid_to_out(h_t.view(-1, num_flat_features(h_t))))
return out, h_t, wr_t, ww_t, m_t
x_t = torch.unbind(x, 0)
out = []
for i in range(x.size()[0]):
o, h, wr, ww, m = step(x_t[i], h, wr, ww, m)
out.append(o)
outs = torch.stack(out, 1)
return outs, h, wr, ww, m
class GPU_NTM(nn.Module):
def __init__(self,
num_inputs,
num_hidden,
num_outputs,
batch_size,
mem_banks,
num_reads,
memory_dims=(32, 32)):
super(GPU_NTM, self).__init__()
self.num_inputs = num_inputs
self.num_hidden = num_hidden
self.num_outputs = num_outputs
self.batch_size = batch_size
self.mem_banks = mem_banks
self.num_reads = num_reads
self.memory_dims = memory_dims
self.EMM = EMM_GPU(self.num_hidden, self.num_reads*self.memory_dims[1], self.batch_size,
memory_banks=self.mem_banks, memory_dims=self.memory_dims)
self.controller = GRUController(self.num_inputs, self.num_hidden, self.batch_size,
num_reads=self.num_reads, memory_dims=self.memory_dims)
self.hid_to_out = nn.Linear(self.num_hidden, self.num_outputs)
def init_hidden(self):
wr, ww, memory = self.EMM.init_weights_mem()
hidden = Variable(torch.zeros(self.batch_size, self.num_hidden), requires_grad=True)
return hidden, wr, ww, memory
def forward(self, x, h, wr, ww, m):
x = x.permute(1, 0, 2, 3)
def step(x_t, h_t, wr_t, ww_t, m_t):
r_tp1, m_tp1, wr_tp1, ww_tp1 = self.EMM(h_t, wr_t, ww_t, m_t) # update reads, memory
print(x_t, h_t)
h_tp1 = self.controller(x_t, r_tp1, h_t) # update hidden state - goes to nan whenever the input is zero
out = Funct.relu(self.hid_to_out(h_tp1)) # send out data
return out, h_tp1, wr_tp1, ww_tp1, m_tp1
x_t = torch.unbind(x, 0)
out = []
for i in range(x.size()[0]):
o, h_t, wr_t, ww_t, m_t = step(x_t[i], h, wr, ww, m)
# assert not torch.equal(h_t.data, h.data)
assert not torch.equal(wr_t.data, wr.data)
assert not torch.equal(ww_t.data, ww.data)
assert not torch.equal(m_t.data, m.data)
h = h_t
wr = wr_t
ww = ww_t
m = m_t
out.append(o)
outs = torch.stack(out, 1)
return outs, h, wr, ww, m
def train_gpu(batch, num_inputs, seq_len, num_hidden):
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
ntm = GPU_NTM(num_inputs, num_hidden, num_inputs, batch, num_reads=1, mem_banks=5)
try:
ntm.load_state_dict(torch.load("models/copy_seqlen_{}.dat".format(seq_len)))
except FileNotFoundError or AttributeError:
pass
ntm.train()
h, wr, ww, m = ntm.init_hidden()
criterion = nn.SmoothL1Loss()
max_seq_len = 20
current_lr = 1e-3
print_steps = 1000
optimizer = optim.Adam(ntm.parameters(), lr=current_lr)
for length in range(4, max_seq_len, 2):
current_lr = 1e-3
running_loss = 0.0
prev_running_loss = []
test = CopyTask(length, [num_inputs, 1], num_samples=3e4)
data_loader = DataLoader(test, batch_size=batch, shuffle=True, num_workers=4)
for epoch in range(1):
for i, data in enumerate(data_loader, 0):
inputs, labels = data
inputs = Variable(inputs)
labels = Variable(labels)
ntm.zero_grad()
outputs, h, wr, ww, m = ntm(inputs, h, wr, ww, m)
if np.isnan(m.data[0, 0, 0]):
print(i)
raise NameError
h = Variable(h.data)
wr = Variable(wr.data)
ww = Variable(ww.data)
m = Variable(m.data)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.data[0]
if i % print_steps == print_steps-1:
print('[length: %d, epoch: %d, i: %5d] average loss: %.3f' % (length, epoch + 1, i + 1,
running_loss / print_steps))
plt.imshow(m[0].data.numpy())
plt.savefig("plots/ntm/{}_{}_{}_memory.png".format(length, epoch + 1, i + 1))
plt.close()
plottable_input = torch.squeeze(inputs.data[0]).numpy()
plottable_output = torch.squeeze(outputs.data[0]).numpy()
plottable_true_output = torch.squeeze(labels.data[0]).numpy()
plt.imshow(plottable_input)
plt.savefig("plots/ntm/{}_{}_{}_input.png".format(length, epoch + 1, i + 1))
plt.close()
plt.imshow(plottable_output)
plt.savefig("plots/ntm/{}_{}_{}_net_output.png".format(length, epoch + 1, i + 1))
plt.close()
plt.imshow(plottable_true_output)
plt.savefig("plots/ntm/{}_{}_{}_true_output.png".format(length, epoch + 1, i + 1))
plt.close()
# print("Previous average losses since lr decay: ", prev_running_loss)
prev_running_loss.append(running_loss / print_steps)
if len(prev_running_loss) > 2:
if np.abs(np.diff(prev_running_loss)).min() <= 0.001 \
and running_loss/print_steps < 1./len(prev_running_loss):
torch.save(ntm.state_dict(), "models/gpu_copy_seqlen_{}.dat".format(seq_len))
current_lr = max([current_lr * 1e-1, 1e-6])
print("lr decayed to: ", current_lr)
optimizer = optim.Adam(ntm.parameters(), lr=current_lr)
prev_running_loss.clear()
running_loss = 0.0
torch.save(ntm.state_dict(), "models/gpu_copy_seqlen_{}.dat".format(seq_len))
print("Finished Training")
test = CopyTask(5 * max_seq_len, [num_inputs - 1, 1], num_samples=1e4)
data_loader = DataLoader(test, batch_size=batch, shuffle=True, num_workers=4)
total_loss = 0.0
for i, data in enumerate(data_loader, 0):
inputs, labels = data
inputs.volatile = True
inputs = Variable(inputs)
labels = Variable(labels)
outputs = ntm(inputs)
if i % 1000 / batch == (1000 / batch) - 1:
plottable_input = torch.squeeze(inputs.data[0]).numpy()
plottable_output = torch.squeeze(outputs.data[0]).numpy()
plt.imshow(plottable_input)
plt.savefig("plots/ntm/{}_{}_input_test.png".format(epoch + 1, i + 1))
plt.close()
plt.imshow(plottable_output)
plt.savefig("plots/ntm/{}_{}_net_output_test.png".format(epoch + 1, i + 1))
plt.close()
total_loss += len(data) * criterion(outputs, labels).data
print("Total Loss: {}".format(total_loss / len(data_loader)))
def train_ntm(batch, num_inputs, seq_len, num_hidden):
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
ntm = NTM(num_inputs, num_hidden, num_inputs, batch, num_reads=1)
h, wr, ww, m = ntm.init_hidden()
try:
ntm.load_state_dict(torch.load("models/copy_seqlen_{}.dat".format(seq_len)))
except FileNotFoundError or AttributeError:
pass
ntm.train()
state = ntm.state_dict()
criterion = nn.L1Loss()
current_lr = 1e-3
print_steps = 1000
optimizer = optim.Adam(ntm.parameters(), lr=current_lr, weight_decay=0.00001)
max_seq_len = 20
for length in range(4, max_seq_len):
current_lr = 1e-3
running_loss = 0.0
prev_running_loss = []
test = CopyTask(length, [num_inputs, 1], num_samples=2e4)
data_loader = DataLoader(test, batch_size=batch, shuffle=True, num_workers=4)
for epoch in range(5):
for i, data in enumerate(data_loader, 0):
inputs, labels = data
inputs = Variable(inputs)
labels = Variable(labels)
ntm.zero_grad()
outputs, h, wr, ww, m = ntm(inputs, h, wr, ww, m)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
h = Variable(h.data)
wr = Variable(wr.data)
ww = Variable(ww.data)
m = Variable(m.data)
running_loss += loss.data[0]
if i % print_steps == print_steps-1:
print('[length: %d, epoch: %d, i: %5d] average loss: %.3f' % (length, epoch + 1, i + 1,
running_loss / print_steps))
plt.imshow(wr.squeeze(0).data.numpy())
plt.savefig("plots/ntm/{}_{}_{}_read.png".format(length, epoch+1, i + 1))
plt.close()
plt.imshow(m.squeeze().data.numpy().T)
plt.savefig("plots/ntm/{}_{}_{}_memory.png".format(length, epoch + 1, i + 1))
plt.close()
plt.imshow(ww.data.numpy())
plt.savefig("plots/ntm/{}_{}_{}_write.png".format(length, epoch + 1, i + 1))
plt.close()
plottable_input = torch.squeeze(inputs.data[0]).numpy()
plottable_output = torch.squeeze(outputs.data[0]).numpy()
plottable_true_output = torch.squeeze(labels.data[0]).numpy()
plt.imshow(plottable_input)
plt.savefig("plots/ntm/{}_{}_{}_input.png".format(length, epoch+1, i + 1))
plt.close()
plt.imshow(plottable_output)
plt.savefig("plots/ntm/{}_{}_{}_net_output.png".format(length, epoch+1, i + 1))
plt.close()
plt.imshow(plottable_true_output)
plt.savefig("plots/ntm/{}_{}_{}_true_output.png".format(length, epoch+1, i + 1))
plt.close()
prev_running_loss.append(running_loss / print_steps)
if len(prev_running_loss) > 2:
if np.abs(np.diff(prev_running_loss)).min() <= 0.001 \
and running_loss / print_steps < 1. / len(prev_running_loss):
torch.save(ntm.state_dict(), "models/gpu_copy_seqlen_{}.dat".format(seq_len))
current_lr = max([current_lr * 1e-1, 1e-6])
print("lr decayed to: ", current_lr)
optimizer = optim.Adam(ntm.parameters(), lr=current_lr)
prev_running_loss.clear()
running_loss = 0.0
torch.save(ntm.state_dict(), "models/copy_seqlen_{}.dat".format(seq_len))
print("Finished Training")
test = CopyTask(5 * max_seq_len, [num_inputs-1, 1], num_samples=1e4)
data_loader = DataLoader(test, batch_size=batch, shuffle=True, num_workers=4)
total_loss = 0.0
for i, data in enumerate(data_loader, 0):
inputs, labels = data
inputs.volatile = True
inputs = Variable(inputs)
labels = Variable(labels)
outputs = ntm(inputs)
if i % 1000/batch == (1000/batch)-1:
plottable_input = torch.squeeze(inputs.data[0]).numpy()
plottable_output = torch.squeeze(outputs.data[0]).numpy()
plt.imshow(plottable_input)
plt.savefig("plots/ntm/{}_{}_input_test.png".format(epoch + 1, i + 1))
plt.close()
plt.imshow(plottable_output)
plt.savefig("plots/ntm/{}_{}_net_output_test.png".format(epoch + 1, i + 1))
plt.close()
total_loss += len(data) * criterion(outputs, labels).data
print("Total Loss: {}".format(total_loss / len(data_loader)))
if __name__ == '__main__':
# train_ntm(1, 8, 5, 100)
train_gpu(1, 8, 5, 100)
| gpl-3.0 | -1,579,938,047,493,311,000 | 34.153846 | 117 | 0.519662 | false |
decabyte/analog_sensors_board | scripts/analog_sensors.py | 1 | 4417 | # analog_sensors.py
# author: Valerio De Carolis <[email protected]>
# date: 2013-10-30
# license: MIT
import sys
import os
import time
import signal
import serial
from serial import Serial, SerialException
# default serial configuration
DEFAULT_CONF = {
'port': '/dev/ttyACM3',
'baudrate': 57600,
'bytesize': serial.EIGHTBITS,
'parity': serial.PARITY_NONE,
'stopbits': serial.STOPBITS_ONE,
'timeout': 5
}
class AnalogSensorsClient:
def __init__(self):
# battery
self.batt0 = 0
self.batt1 = 0
self.batt2 = 0
self.batt3 = 0
self.raw_batt0 = 0
self.raw_batt1 = 0
self.raw_batt2 = 0
self.raw_batt3 = 0
# temperature
self.temp0 = 0
self.temp1 = 0
self.temp2 = 0
self.temp3 = 0
self.raw_temp0 = 0
self.raw_temp1 = 0
self.raw_temp2 = 0
self.raw_temp3 = 0
# pressure
self.bmp_temperature = 0
self.bmp_pressure = 0
self.bmp_ut = 0
self.bmp_up = 0
self.bmp_dirty = 0
# humidity
self.humidity = 0
self.raw_humidity = 0
# timestamps
self.timestamp = 0
# protocol parsers
self.GRAMMAR = {
'BMPCAL': self.parse_bmpcal,
'BAT': self.parse_battery,
'TEMP': self.parse_temperature,
'HIH': self.parse_humidity,
'BMP': self.parse_pressure,
'TIME': self.parse_timestamp
}
def print_status(self):
print('BATTERY VOLTAGES: {}V {}V {}V {}V'.format(
self.batt0, self.batt1, self.batt2, self.batt3))
print('VEHICLE TEMPERATURES: {}C {}C {}C {}C'.format(
self.temp0, self.temp1, self.temp2, self.temp3))
print('VEHICLE ENVIRONMENT: {}C {}Pa {}RH%\n'.format(
self.bmp_temperature, self.bmp_pressure, self.humidity))
def parse_message(self, msg):
'''
An example serial message:
$TEMP,122.10,123.10,123.10,127.85,488,492,492,511
'''
# parse serial message
items = msg.split(',')
# look for identifier
if items[0][0] is not '$':
return
# extract message type
msg_type = items[0][1:]
# check message type
try:
parser = self.GRAMMAR[msg_type]
parser(items)
except KeyError as ke:
print('[WARN]: message not recognized! bad format?')
def parse_battery(self, field):
# battery voltages
self.batt0 = float(field[1])
self.batt1 = float(field[2])
self.batt2 = float(field[3])
self.batt3 = float(field[4])
# raw analog readings
self.raw_batt0 = int(field[5])
self.raw_batt1 = int(field[6])
self.raw_batt2 = int(field[7])
self.raw_batt3 = int(field[8])
def parse_bmpcal(self, field):
pass
def parse_temperature(self, field):
# temperature
self.temp0 = float(field[1])
self.temp1 = float(field[2])
self.temp2 = float(field[3])
self.temp3 = float(field[4])
# raw analog readings
self.raw_temp0 = int(field[5])
self.raw_temp1 = int(field[6])
self.raw_temp2 = int(field[7])
self.raw_temp3 = int(field[8])
def parse_humidity(self, field):
self.humidity = float(field[1])
self.raw_humidity = int(field[2])
def parse_pressure(self, field):
self.bmp_temperature = float(field[1])
self.bmp_pressure = float(field[2])
self.bmp_ut = int(field[3])
self.bmp_up = int(field[4])
self.bmp_dirty = int(field[5])
def parse_timestamp(self, field):
self.timestamp = int(field[1])
def main():
# control flags
running = True
connected = False
sconn = None
# signal handler
def handler(signum, frame):
running = False
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
# analog client
client = AnalogSensorsClient()
# connection main loop
while running:
try:
sconn = Serial(**DEFAULT_CONF)
except ValueError as ve:
print('[FATAL]: bad port configuration!')
sys.exit(-1)
except SerialException as se:
connected = False
print('[ERROR]: device not found, waiting for device ...')
# wait a little before trying to reconnect
time.sleep(5)
continue
else:
connected = True
# data processing loop
while connected:
try:
line = sconn.readline()
except SerialException as se:
connected = False
print('[ERROR]: connection lost!')
break
if len(line) != 0:
msg = line.strip() # remove any return carriage
client.parse_message(msg) # digest the message
# display status
client.print_status()
# release the serial connection
if sconn.isOpen():
sconn.close()
# close the connection if hang
if sconn is not None and sconn.isOpen():
sconn.close()
sys.exit(0)
if __name__ == '__main__':
main()
| mit | -6,909,965,110,469,711,000 | 19.737089 | 61 | 0.659724 | false |
hellowebapp/hellowebapp-ic-code | collection/models.py | 1 | 2080 | from __future__ import unicode_literals
from PIL import Image
from django.contrib.auth.models import User
from django.db import models
class Timestamp(models.Model):
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Thing(Timestamp):
name = models.CharField(max_length=255)
description = models.TextField()
slug = models.SlugField(unique=True)
user = models.OneToOneField(User, blank=True, null=True)
upgraded = models.BooleanField(default=False)
stripe_id = models.CharField(max_length=255, blank=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return "/things/%s/" % self.slug
class Social(Timestamp):
SOCIAL_TYPES = (
('twitter', 'Twitter'),
('facebook', 'Facebook'),
('pinterest', 'Pinterest'),
('instagram', 'Instagram'),
)
network = models.CharField(max_length=255, choices=SOCIAL_TYPES)
username = models.CharField(max_length=255)
thing = models.ForeignKey(Thing,
on_delete=CASCADE, related_name="social_accounts")
# where we're overriding the admin name
class Meta:
verbose_name_plural = "Social media links"
# our helper, add above the new model
def get_image_path(instance, filename):
return '/'.join(['thing_images', instance.thing.slug, filename])
class Upload(models.Model):
thing = models.ForeignKey(Thing,
on_delete=models.CASCADE, related_name="uploads")
image = models.ImageField(upload_to=get_image_path)
# add this bit in after our model
def save(self, *args, **kwargs):
# this is required when you override save functions
super(Upload, self).save(*args, **kwargs)
# our new code
if self.image:
image = Image.open(self.image)
i_width, i_height = image.size
max_size = (1000,1000)
if i_width > 1000:
image.thumbnail(max_size, Image.ANTIALIAS)
image.save(self.image.path)
| mit | -5,621,587,585,220,553,000 | 28.714286 | 68 | 0.650481 | false |
sannecottaar/burnman | burnman/material.py | 1 | 20844 | from __future__ import print_function
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2017 by the BurnMan team, released under the GNU
# GPL v2 or later.
import numpy as np
def material_property(func):
"""
Decorator @material_property to be used for cached properties of materials.
To be used on function in Material or derived classes that should be exposed
as read-only properties that are cached. The function Material.reset() will
reset the cached values.
Internally, the values are stored in a dictionary member called _cached, which
is emptied by .reset().
"""
class mat_obj():
def __init__(self, func):
self.func = func
self.varname = self.func.__name__
def get(self, obj):
if not hasattr(obj, "_cached"):
raise Exception("The material_property decorator could not find class member _cached. "
"Did you forget to call Material.__init__(self) in __init___?")
cache_array = getattr(obj, "_cached")
if self.varname not in cache_array:
cache_array[self.varname] = self.func(obj)
return cache_array[self.varname]
return property(mat_obj(func).get, doc=func.__doc__)
class Material(object):
"""
Base class for all materials. The main functionality is unroll() which
returns a list of objects of type :class:`~burnman.mineral.Mineral` and their molar
fractions. This class is available as ``burnman.Material``.
The user needs to call set_method() (once in the beginning) and set_state()
before querying the material with unroll() or density().
"""
def __init__(self):
self._pressure = None
self._temperature = None
if not hasattr(self, "name"):
# if a derived class decides to set .name before calling this
# constructor (I am looking at you, SLB_2011.py!), do not
# overwrite the name here.
self._name = self.__class__.__name__
self._cached = {}
@property
def name(self):
""" Human-readable name of this material.
By default this will return the name of the class, but it can be set
to an arbitrary string. Overriden in Mineral.
"""
return self._name
@name.setter
def name(self, value):
self._name = value
def set_method(self, method):
"""
Set the averaging method. See :doc:`averaging` for details.
Notes
-----
Needs to be implemented in derived classes.
"""
raise NotImplementedError(
"need to implement set_method() in derived class!")
def to_string(self):
"""
Returns a human-readable name of this material. The default implementation will return the name of the class,
which is a reasonable default.
Returns
-------
name : string
Name of this material.
"""
return "'" + self.name + "'"
def debug_print(self, indent=""):
"""
Print a human-readable representation of this Material.
"""
raise NotImplementedError(
"Derived classes need to implement debug_print(). This is '" + self.__class__.__name__ + "'")
def print_minerals_of_current_state(self):
"""
Print a human-readable representation of this Material at the current P, T as a list of minerals.
This requires set_state() has been called before.
"""
(minerals, fractions) = self.unroll()
if len(minerals) == 1:
print(minerals[0].to_string())
else:
print("Material %s:" % self.to_string())
for (mineral, fraction) in zip(minerals, fractions):
print(" %g of phase %s" % (fraction, mineral.to_string()))
def set_state(self, pressure, temperature):
"""
Set the material to the given pressure and temperature.
Parameters
----------
pressure : float
The desired pressure in [Pa].
temperature : float
The desired temperature in [K].
"""
if not hasattr(self, "_pressure"):
raise Exception("Material.set_state() could not find class member _pressure. "
"Did you forget to call Material.__init__(self) in __init___?")
self.reset()
self._pressure = pressure
self._temperature = temperature
def reset(self):
"""
Resets all cached material properties.
It is typically not required for the user to call this function.
"""
self._cached = {}
def unroll(self):
"""
Unroll this material into a list of :class:`burnman.Mineral` and their molar fractions. All averaging schemes
then operate on this list of minerals. Note that the return value of this function may depend on the current
state (temperature, pressure).
Notes
-----
Needs to be implemented in derived classes.
Returns
-------
fractions : list of float
List of molar fractions, should sum to 1.0.
minerals : list of :class:`burnman.Mineral`
List of minerals.
"""
raise NotImplementedError(
"need to implement unroll() in derived class!")
def evaluate(self, vars_list, pressures, temperatures):
"""
Returns an array of material properties requested through a list of strings at given pressure and temperature
conditions. At the end it resets the set_state to the original values.
The user needs to call set_method() before.
Parameters
----------
vars_list : list of strings
Variables to be returned for given conditions
pressures : ndlist or ndarray of float
n-dimensional array of pressures in [Pa].
temperatures : ndlist or ndarray of float
n-dimensional array of temperatures in [K].
Returns
-------
output : array of array of float
Array returning all variables at given pressure/temperature values. output[i][j] is property vars_list[j]
and temperatures[i] and pressures[i].
"""
old_pressure = self.pressure
old_temperature = self.temperature
pressures = np.array(pressures)
temperatures = np.array(temperatures)
assert(pressures.shape == temperatures.shape)
output = np.empty((len(vars_list),) + pressures.shape)
for i, p in np.ndenumerate(pressures):
self.set_state(p, temperatures[i])
for j in range(len(vars_list)):
output[(j,) + i] = getattr(self, vars_list[j])
if old_pressure is None or old_temperature is None:
# do not set_state if old values were None. Just reset to None
# manually
self._pressure = self._temperature = None
self.reset()
else:
self.set_state(old_pressure, old_temperature)
return output
@property
def pressure(self):
"""
Returns current pressure that was set with :func:`~burnman.material.Material.set_state`.
Notes
-----
- Aliased with :func:`~burnman.material.Material.P`.
Returns
-------
pressure : float
Pressure in [Pa].
"""
return self._pressure
@property
def temperature(self):
"""
Returns current temperature that was set with :func:`~burnman.material.Material.set_state`.
Notes
-----
- Aliased with :func:`~burnman.material.Material.T`.
Returns
-------
temperature : float
Temperature in [K].
"""
return self._temperature
@material_property
def internal_energy(self):
"""
Returns the internal energy of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.energy`.
Returns
-------
internal_energy : float
The internal energy in [J].
"""
raise NotImplementedError(
"need to implement internal_energy() in derived class!")
@material_property
def molar_gibbs(self):
"""
Returns the Gibbs free energy of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.gibbs`.
Returns
-------
molar_gibbs : float
Gibbs free energy in [J].
"""
raise NotImplementedError(
"need to implement molar_gibbs() in derived class!")
@material_property
def molar_helmholtz(self):
"""
Returns the Helmholtz free energy of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.helmholtz`.
Returns
-------
molar_helmholtz : float
Helmholtz free energy in [J].
"""
raise NotImplementedError(
"need to implement molar_helmholtz() in derived class!")
@material_property
def molar_mass(self):
"""
Returns molar mass of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
Returns
-------
molar_mass : float
Molar mass in [kg/mol].
"""
raise NotImplementedError(
"need to implement molar_mass() in derived class!")
@material_property
def molar_volume(self):
"""
Returns molar volume of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.V`.
Returns
-------
molar_volume : float
Molar volume in [m^3/mol].
"""
raise NotImplementedError(
"need to implement molar_volume() in derived class!")
@material_property
def density(self):
"""
Returns the density of this material.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.rho`.
Returns
-------
density : float
The density of this material in [kg/m^3].
"""
raise NotImplementedError(
"need to implement density() in derived class!")
@material_property
def molar_entropy(self):
"""
Returns entropy of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.S`.
Returns
-------
entropy : float
Entropy in [J].
"""
raise NotImplementedError(
"need to implement molar_entropy() in derived class!")
@material_property
def molar_enthalpy(self):
"""
Returns enthalpy of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.H`.
Returns
-------
enthalpy : float
Enthalpy in [J].
"""
raise NotImplementedError(
"need to implement molar_enthalpy() in derived class!")
@material_property
def isothermal_bulk_modulus(self):
"""
Returns isothermal bulk modulus of the material.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.K_T`.
Returns
-------
isothermal_bulk_modulus : float
Bulk modulus in [Pa].
"""
raise NotImplementedError(
"need to implement isothermal_bulk_moduls() in derived class!")
@material_property
def adiabatic_bulk_modulus(self):
"""
Returns the adiabatic bulk modulus of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.K_S`.
Returns
-------
adiabatic_bulk_modulus : float
Adiabatic bulk modulus in [Pa].
"""
raise NotImplementedError(
"need to implement adiabatic_bulk_modulus() in derived class!")
@material_property
def isothermal_compressibility(self):
"""
Returns isothermal compressibility of the mineral (or inverse isothermal bulk modulus).
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.beta_T`.
Returns
-------
(K_T)^-1 : float
Compressibility in [1/Pa].
"""
raise NotImplementedError(
"need to implement compressibility() in derived class!")
@material_property
def adiabatic_compressibility(self):
"""
Returns adiabatic compressibility of the mineral (or inverse adiabatic bulk modulus).
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.beta_S`.
Returns
-------
adiabatic_compressibility : float
adiabatic compressibility in [1/Pa].
"""
raise NotImplementedError(
"need to implement compressibility() in derived class!")
@material_property
def shear_modulus(self):
"""
Returns shear modulus of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.beta_G`.
Returns
-------
shear_modulus : float
Shear modulus in [Pa].
"""
raise NotImplementedError(
"need to implement shear_modulus() in derived class!")
@material_property
def p_wave_velocity(self):
"""
Returns P wave speed of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.v_p`.
Returns
-------
p_wave_velocity : float
P wave speed in [m/s].
"""
raise NotImplementedError(
"need to implement p_wave_velocity() in derived class!")
@material_property
def bulk_sound_velocity(self):
"""
Returns bulk sound speed of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.v_phi`.
Returns
-------
bulk sound velocity: float
Sound velocity in [m/s].
"""
raise NotImplementedError(
"need to implement bulk_sound_velocity() in derived class!")
@material_property
def shear_wave_velocity(self):
"""
Returns shear wave speed of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.v_s`.
Returns
-------
shear_wave_velocity : float
Wave speed in [m/s].
"""
raise NotImplementedError(
"need to implement shear_wave_velocity() in derived class!")
@material_property
def grueneisen_parameter(self):
"""
Returns the grueneisen parameter of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.gr`.
Returns
-------
gr : float
Grueneisen parameters [unitless].
"""
raise NotImplementedError(
"need to implement grueneisen_parameter() in derived class!")
@material_property
def thermal_expansivity(self):
"""
Returns thermal expansion coefficient of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.alpha`.
Returns
-------
alpha : float
Thermal expansivity in [1/K].
"""
raise NotImplementedError(
"need to implement thermal_expansivity() in derived class!")
@material_property
def heat_capacity_v(self):
"""
Returns heat capacity at constant volume of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.C_v`.
Returns
-------
heat_capacity_v : float
Heat capacity in [J/K/mol].
"""
raise NotImplementedError(
"need to implement heat_capacity_v() in derived class!")
@material_property
def heat_capacity_p(self):
"""
Returns heat capacity at constant pressure of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.C_p`.
Returns
-------
heat_capacity_p : float
Heat capacity in [J/K/mol].
"""
raise NotImplementedError(
"need to implement heat_capacity_p() in derived class!")
#
# Aliased properties
@property
def P(self):
"""Alias for :func:`~burnman.material.Material.pressure`"""
return self.pressure
@property
def T(self):
"""Alias for :func:`~burnman.material.Material.temperature`"""
return self.temperature
@property
def energy(self):
"""Alias for :func:`~burnman.material.Material.internal_energy`"""
return self.internal_energy
@property
def helmholtz(self):
"""Alias for :func:`~burnman.material.Material.molar_helmholtz`"""
return self.molar_helmholtz
@property
def gibbs(self):
"""Alias for :func:`~burnman.material.Material.molar_gibbs`"""
return self.molar_gibbs
@property
def V(self):
"""Alias for :func:`~burnman.material.Material.molar_volume`"""
return self.molar_volume
@property
def rho(self):
"""Alias for :func:`~burnman.material.Material.density`"""
return self.density
@property
def S(self):
"""Alias for :func:`~burnman.material.Material.molar_entropy`"""
return self.molar_entropy
@property
def H(self):
"""Alias for :func:`~burnman.material.Material.molar_enthalpy`"""
return self.molar_enthalpy
@property
def K_T(self):
"""Alias for :func:`~burnman.material.Material.isothermal_bulk_modulus`"""
return self.isothermal_bulk_modulus
@property
def K_S(self):
"""Alias for :func:`~burnman.material.Material.adiabatic_bulk_modulus`"""
return self.adiabatic_bulk_modulus
@property
def beta_T(self):
"""Alias for :func:`~burnman.material.Material.isothermal_compressibility`"""
return self.isothermal_compressibility
@property
def beta_S(self):
"""Alias for :func:`~burnman.material.Material.adiabatic_compressibility`"""
return self.adiabatic_compressibility
@property
def G(self):
"""Alias for :func:`~burnman.material.Material.shear_modulus`"""
return self.shear_modulus
@property
def v_p(self):
"""Alias for :func:`~burnman.material.Material.p_wave_velocity`"""
return self.p_wave_velocity
@property
def v_phi(self):
"""Alias for :func:`~burnman.material.Material.bulk_sound_velocity`"""
return self.bulk_sound_velocity
@property
def v_s(self):
"""Alias for :func:`~burnman.material.Material.shear_wave_velocity`"""
return self.shear_wave_velocity
@property
def gr(self):
"""Alias for :func:`~burnman.material.Material.grueneisen_parameter`"""
return self.grueneisen_parameter
@property
def alpha(self):
"""Alias for :func:`~burnman.material.Material.thermal_expansivity`"""
return self.thermal_expansivity
@property
def C_v(self):
"""Alias for :func:`~burnman.material.Material.heat_capacity_v`"""
return self.heat_capacity_v
@property
def C_p(self):
"""Alias for :func:`~burnman.material.Material.heat_capacity_p`"""
return self.heat_capacity_p
| gpl-2.0 | -7,016,233,993,997,625,000 | 28.607955 | 117 | 0.576473 | false |
hhursev/recipe-scraper | tests/test_marthastewart.py | 1 | 2773 | from recipe_scrapers.marthastewart import MarthaStewart
from tests import ScraperTest
class TestMarthaStewart(ScraperTest):
scraper_class = MarthaStewart
maxDiff = None
def test_host(self):
self.assertEqual("marthastewart.com", self.harvester_class.host())
def test_canonical_url(self):
self.assertEqual(
"https://www.marthastewart.com/336792/breaded-chicken-breasts",
self.harvester_class.canonical_url(),
)
def test_title(self):
self.assertEqual(self.harvester_class.title(), "Breaded Chicken Breasts")
def test_total_time(self):
self.assertEqual(25, self.harvester_class.total_time())
def test_yields(self):
self.assertEqual("4", self.harvester_class.yields())
def test_image(self):
self.assertEqual(
"https://imagesvc.meredithcorp.io/v3/mm/image?url=https%3A%2F%2Fassets.marthastewart.com%2Fstyles%2Fwmax-750%2Fd31%2Fbreaded-chicken-cutlets-d104370%2Fbreaded-chicken-cutlets-d104370_horiz.jpg%3Fitok%3DdnK5TccB",
self.harvester_class.image(),
)
def test_ingredients(self):
self.assertSetEqual(
set(
[
"3 large eggs",
"Coarse salt",
"1/3 cup all-purpose flour",
"3 1/2 cups fresh breadcrumbs",
"1 cup vegetable oil",
"8 thin chicken cutlets (about 1 1/2 pounds total)",
"Lemon wedges, for serving (optional)",
]
),
set(self.harvester_class.ingredients()),
)
def test_instructions(self):
return self.assertEqual(
"\n".join(
[
"In a shallow dish, whisk eggs with teaspoon salt; let stand 5 minutes. In another shallow dish, season flour with 1/4 teaspoon salt. In a third shallow dish, season breadcrumbs with 1 teaspoon salt.",
"In a large cast-iron skillet or other heavy deep skillet, heat oil over medium. Meanwhile, pat chicken dry with paper towels. Coat in flour, shaking off excess, then dip in egg (letting excess drip off). Dredge in breadcrumbs, turning twice and patting to adhere.",
"Increase heat to medium-high. Working in batches, add chicken to skillet; cook, gently shaking skillet occasionally, until chicken is browned, about 4 minutes. Turn with tongs; cook until browned and opaque throughout, 2 to 3 minutes more (if browning too quickly, lower heat). Between batches, skim off brown crumbs from oil with a slotted spoon. Drain chicken on paper towels; season with salt.",
]
),
self.harvester_class.instructions(),
)
| mit | 824,724,842,242,292,500 | 44.459016 | 419 | 0.62784 | false |
j-carl/ansible | lib/ansible/module_utils/facts/system/distribution.py | 1 | 31164 | # -*- coding: utf-8 -*-
# Copyright: (c) Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import platform
import re
from ansible.module_utils.common.sys_info import get_distribution, get_distribution_version, \
get_distribution_codename
from ansible.module_utils.facts.utils import get_file_content
from ansible.module_utils.facts.collector import BaseFactCollector
def get_uname(module, flags=('-v')):
if isinstance(flags, str):
flags = flags.split()
command = ['uname']
command.extend(flags)
rc, out, err = module.run_command(command)
if rc == 0:
return out
return None
def _file_exists(path, allow_empty=False):
# not finding the file, exit early
if not os.path.exists(path):
return False
# if just the path needs to exists (ie, it can be empty) we are done
if allow_empty:
return True
# file exists but is empty and we dont allow_empty
if os.path.getsize(path) == 0:
return False
# file exists with some content
return True
class DistributionFiles:
'''has-a various distro file parsers (os-release, etc) and logic for finding the right one.'''
# every distribution name mentioned here, must have one of
# - allowempty == True
# - be listed in SEARCH_STRING
# - have a function get_distribution_DISTNAME implemented
# keep names in sync with Conditionals page of docs
OSDIST_LIST = (
{'path': '/etc/altlinux-release', 'name': 'Altlinux'},
{'path': '/etc/oracle-release', 'name': 'OracleLinux'},
{'path': '/etc/slackware-version', 'name': 'Slackware'},
{'path': '/etc/redhat-release', 'name': 'RedHat'},
{'path': '/etc/vmware-release', 'name': 'VMwareESX', 'allowempty': True},
{'path': '/etc/openwrt_release', 'name': 'OpenWrt'},
{'path': '/etc/system-release', 'name': 'Amazon'},
{'path': '/etc/alpine-release', 'name': 'Alpine'},
{'path': '/etc/arch-release', 'name': 'Archlinux', 'allowempty': True},
{'path': '/etc/os-release', 'name': 'Archlinux'},
{'path': '/etc/os-release', 'name': 'SUSE'},
{'path': '/etc/SuSE-release', 'name': 'SUSE'},
{'path': '/etc/gentoo-release', 'name': 'Gentoo'},
{'path': '/etc/os-release', 'name': 'Debian'},
{'path': '/etc/lsb-release', 'name': 'Debian'},
{'path': '/etc/lsb-release', 'name': 'Mandriva'},
{'path': '/etc/sourcemage-release', 'name': 'SMGL'},
{'path': '/usr/lib/os-release', 'name': 'ClearLinux'},
{'path': '/etc/coreos/update.conf', 'name': 'Coreos'},
{'path': '/etc/flatcar/update.conf', 'name': 'Flatcar'},
{'path': '/etc/os-release', 'name': 'NA'},
)
SEARCH_STRING = {
'OracleLinux': 'Oracle Linux',
'RedHat': 'Red Hat',
'Altlinux': 'ALT',
'SMGL': 'Source Mage GNU/Linux',
}
# We can't include this in SEARCH_STRING because a name match on its keys
# causes a fallback to using the first whitespace separated item from the file content
# as the name. For os-release, that is in form 'NAME=Arch'
OS_RELEASE_ALIAS = {
'Archlinux': 'Arch Linux'
}
STRIP_QUOTES = r'\'\"\\'
def __init__(self, module):
self.module = module
def _get_file_content(self, path):
return get_file_content(path)
def _get_dist_file_content(self, path, allow_empty=False):
# cant find that dist file or it is incorrectly empty
if not _file_exists(path, allow_empty=allow_empty):
return False, None
data = self._get_file_content(path)
return True, data
def _parse_dist_file(self, name, dist_file_content, path, collected_facts):
dist_file_dict = {}
dist_file_content = dist_file_content.strip(DistributionFiles.STRIP_QUOTES)
if name in self.SEARCH_STRING:
# look for the distribution string in the data and replace according to RELEASE_NAME_MAP
# only the distribution name is set, the version is assumed to be correct from distro.linux_distribution()
if self.SEARCH_STRING[name] in dist_file_content:
# this sets distribution=RedHat if 'Red Hat' shows up in data
dist_file_dict['distribution'] = name
dist_file_dict['distribution_file_search_string'] = self.SEARCH_STRING[name]
else:
# this sets distribution to what's in the data, e.g. CentOS, Scientific, ...
dist_file_dict['distribution'] = dist_file_content.split()[0]
return True, dist_file_dict
if name in self.OS_RELEASE_ALIAS:
if self.OS_RELEASE_ALIAS[name] in dist_file_content:
dist_file_dict['distribution'] = name
return True, dist_file_dict
return False, dist_file_dict
# call a dedicated function for parsing the file content
# TODO: replace with a map or a class
try:
# FIXME: most of these dont actually look at the dist file contents, but random other stuff
distfunc_name = 'parse_distribution_file_' + name
distfunc = getattr(self, distfunc_name)
parsed, dist_file_dict = distfunc(name, dist_file_content, path, collected_facts)
return parsed, dist_file_dict
except AttributeError as exc:
self.module.debug('exc: %s' % exc)
# this should never happen, but if it does fail quietly and not with a traceback
return False, dist_file_dict
return True, dist_file_dict
# to debug multiple matching release files, one can use:
# self.facts['distribution_debug'].append({path + ' ' + name:
# (parsed,
# self.facts['distribution'],
# self.facts['distribution_version'],
# self.facts['distribution_release'],
# )})
def _guess_distribution(self):
# try to find out which linux distribution this is
dist = (get_distribution(), get_distribution_version(), get_distribution_codename())
distribution_guess = {
'distribution': dist[0] or 'NA',
'distribution_version': dist[1] or 'NA',
# distribution_release can be the empty string
'distribution_release': 'NA' if dist[2] is None else dist[2]
}
distribution_guess['distribution_major_version'] = distribution_guess['distribution_version'].split('.')[0] or 'NA'
return distribution_guess
def process_dist_files(self):
# Try to handle the exceptions now ...
# self.facts['distribution_debug'] = []
dist_file_facts = {}
dist_guess = self._guess_distribution()
dist_file_facts.update(dist_guess)
for ddict in self.OSDIST_LIST:
name = ddict['name']
path = ddict['path']
allow_empty = ddict.get('allowempty', False)
has_dist_file, dist_file_content = self._get_dist_file_content(path, allow_empty=allow_empty)
# but we allow_empty. For example, ArchLinux with an empty /etc/arch-release and a
# /etc/os-release with a different name
if has_dist_file and allow_empty:
dist_file_facts['distribution'] = name
dist_file_facts['distribution_file_path'] = path
dist_file_facts['distribution_file_variety'] = name
break
if not has_dist_file:
# keep looking
continue
parsed_dist_file, parsed_dist_file_facts = self._parse_dist_file(name, dist_file_content, path, dist_file_facts)
# finally found the right os dist file and were able to parse it
if parsed_dist_file:
dist_file_facts['distribution'] = name
dist_file_facts['distribution_file_path'] = path
# distribution and file_variety are the same here, but distribution
# will be changed/mapped to a more specific name.
# ie, dist=Fedora, file_variety=RedHat
dist_file_facts['distribution_file_variety'] = name
dist_file_facts['distribution_file_parsed'] = parsed_dist_file
dist_file_facts.update(parsed_dist_file_facts)
break
return dist_file_facts
# TODO: FIXME: split distro file parsing into its own module or class
def parse_distribution_file_Slackware(self, name, data, path, collected_facts):
slackware_facts = {}
if 'Slackware' not in data:
return False, slackware_facts # TODO: remove
slackware_facts['distribution'] = name
version = re.findall(r'\w+[.]\w+\+?', data)
if version:
slackware_facts['distribution_version'] = version[0]
return True, slackware_facts
def parse_distribution_file_Amazon(self, name, data, path, collected_facts):
amazon_facts = {}
if 'Amazon' not in data:
return False, amazon_facts
amazon_facts['distribution'] = 'Amazon'
version = [n for n in data.split() if n.isdigit()]
version = version[0] if version else 'NA'
amazon_facts['distribution_version'] = version
return True, amazon_facts
def parse_distribution_file_OpenWrt(self, name, data, path, collected_facts):
openwrt_facts = {}
if 'OpenWrt' not in data:
return False, openwrt_facts # TODO: remove
openwrt_facts['distribution'] = name
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
openwrt_facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
openwrt_facts['distribution_release'] = release.groups()[0]
return True, openwrt_facts
def parse_distribution_file_Alpine(self, name, data, path, collected_facts):
alpine_facts = {}
alpine_facts['distribution'] = 'Alpine'
alpine_facts['distribution_version'] = data
return True, alpine_facts
def parse_distribution_file_SUSE(self, name, data, path, collected_facts):
suse_facts = {}
if 'suse' not in data.lower():
return False, suse_facts # TODO: remove if tested without this
if path == '/etc/os-release':
for line in data.splitlines():
distribution = re.search("^NAME=(.*)", line)
if distribution:
suse_facts['distribution'] = distribution.group(1).strip('"')
# example pattern are 13.04 13.0 13
distribution_version = re.search(r'^VERSION_ID="?([0-9]+\.?[0-9]*)"?', line)
if distribution_version:
suse_facts['distribution_version'] = distribution_version.group(1)
suse_facts['distribution_major_version'] = distribution_version.group(1).split('.')[0]
if 'open' in data.lower():
release = re.search(r'^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line)
if release:
suse_facts['distribution_release'] = release.groups()[0]
elif 'enterprise' in data.lower() and 'VERSION_ID' in line:
# SLES doesn't got funny release names
release = re.search(r'^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line)
if release.group(1):
release = release.group(1)
else:
release = "0" # no minor number, so it is the first release
suse_facts['distribution_release'] = release
# Starting with SLES4SAP12 SP3 NAME reports 'SLES' instead of 'SLES_SAP'
# According to SuSe Support (SR101182877871) we should use the CPE_NAME to detect SLES4SAP
if re.search("^CPE_NAME=.*sles_sap.*$", line):
suse_facts['distribution'] = 'SLES_SAP'
elif path == '/etc/SuSE-release':
if 'open' in data.lower():
data = data.splitlines()
distdata = get_file_content(path).splitlines()[0]
suse_facts['distribution'] = distdata.split()[0]
for line in data:
release = re.search('CODENAME *= *([^\n]+)', line)
if release:
suse_facts['distribution_release'] = release.groups()[0].strip()
elif 'enterprise' in data.lower():
lines = data.splitlines()
distribution = lines[0].split()[0]
if "Server" in data:
suse_facts['distribution'] = "SLES"
elif "Desktop" in data:
suse_facts['distribution'] = "SLED"
for line in lines:
release = re.search('PATCHLEVEL = ([0-9]+)', line) # SLES doesn't got funny release names
if release:
suse_facts['distribution_release'] = release.group(1)
suse_facts['distribution_version'] = collected_facts['distribution_version'] + '.' + release.group(1)
return True, suse_facts
def parse_distribution_file_Debian(self, name, data, path, collected_facts):
debian_facts = {}
if 'Debian' in data or 'Raspbian' in data:
debian_facts['distribution'] = 'Debian'
release = re.search(r"PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data)
if release:
debian_facts['distribution_release'] = release.groups()[0]
# Last resort: try to find release from tzdata as either lsb is missing or this is very old debian
if collected_facts['distribution_release'] == 'NA' and 'Debian' in data:
dpkg_cmd = self.module.get_bin_path('dpkg')
if dpkg_cmd:
cmd = "%s --status tzdata|grep Provides|cut -f2 -d'-'" % dpkg_cmd
rc, out, err = self.module.run_command(cmd)
if rc == 0:
debian_facts['distribution_release'] = out.strip()
elif 'Ubuntu' in data:
debian_facts['distribution'] = 'Ubuntu'
# nothing else to do, Ubuntu gets correct info from python functions
elif 'SteamOS' in data:
debian_facts['distribution'] = 'SteamOS'
# nothing else to do, SteamOS gets correct info from python functions
elif path in ('/etc/lsb-release', '/etc/os-release') and ('Kali' in data or 'Parrot' in data):
if 'Kali' in data:
# Kali does not provide /etc/lsb-release anymore
debian_facts['distribution'] = 'Kali'
elif 'Parrot' in data:
debian_facts['distribution'] = 'Parrot'
release = re.search('DISTRIB_RELEASE=(.*)', data)
if release:
debian_facts['distribution_release'] = release.groups()[0]
elif 'Devuan' in data:
debian_facts['distribution'] = 'Devuan'
release = re.search(r"PRETTY_NAME=\"?[^(\"]+ \(?([^) \"]+)\)?", data)
if release:
debian_facts['distribution_release'] = release.groups()[0]
version = re.search(r"VERSION_ID=\"(.*)\"", data)
if version:
debian_facts['distribution_version'] = version.group(1)
debian_facts['distribution_major_version'] = version.group(1)
elif 'Cumulus' in data:
debian_facts['distribution'] = 'Cumulus Linux'
version = re.search(r"VERSION_ID=(.*)", data)
if version:
major, _minor, _dummy_ver = version.group(1).split(".")
debian_facts['distribution_version'] = version.group(1)
debian_facts['distribution_major_version'] = major
release = re.search(r'VERSION="(.*)"', data)
if release:
debian_facts['distribution_release'] = release.groups()[0]
elif "Mint" in data:
debian_facts['distribution'] = 'Linux Mint'
version = re.search(r"VERSION_ID=\"(.*)\"", data)
if version:
debian_facts['distribution_version'] = version.group(1)
debian_facts['distribution_major_version'] = version.group(1).split('.')[0]
else:
return False, debian_facts
return True, debian_facts
def parse_distribution_file_Mandriva(self, name, data, path, collected_facts):
mandriva_facts = {}
if 'Mandriva' in data:
mandriva_facts['distribution'] = 'Mandriva'
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
mandriva_facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
mandriva_facts['distribution_release'] = release.groups()[0]
mandriva_facts['distribution'] = name
else:
return False, mandriva_facts
return True, mandriva_facts
def parse_distribution_file_NA(self, name, data, path, collected_facts):
na_facts = {}
for line in data.splitlines():
distribution = re.search("^NAME=(.*)", line)
if distribution and name == 'NA':
na_facts['distribution'] = distribution.group(1).strip('"')
version = re.search("^VERSION=(.*)", line)
if version and collected_facts['distribution_version'] == 'NA':
na_facts['distribution_version'] = version.group(1).strip('"')
return True, na_facts
def parse_distribution_file_Coreos(self, name, data, path, collected_facts):
coreos_facts = {}
# FIXME: pass in ro copy of facts for this kind of thing
distro = get_distribution()
if distro.lower() == 'coreos':
if not data:
# include fix from #15230, #15228
# TODO: verify this is ok for above bugs
return False, coreos_facts
release = re.search("^GROUP=(.*)", data)
if release:
coreos_facts['distribution_release'] = release.group(1).strip('"')
else:
return False, coreos_facts # TODO: remove if tested without this
return True, coreos_facts
def parse_distribution_file_Flatcar(self, name, data, path, collected_facts):
flatcar_facts = {}
distro = get_distribution()
if distro.lower() == 'flatcar':
if not data:
return False, flatcar_facts
release = re.search("^GROUP=(.*)", data)
if release:
flatcar_facts['distribution_release'] = release.group(1).strip('"')
else:
return False, flatcar_facts
return True, flatcar_facts
def parse_distribution_file_ClearLinux(self, name, data, path, collected_facts):
clear_facts = {}
if "clearlinux" not in name.lower():
return False, clear_facts
pname = re.search('NAME="(.*)"', data)
if pname:
if 'Clear Linux' not in pname.groups()[0]:
return False, clear_facts
clear_facts['distribution'] = pname.groups()[0]
version = re.search('VERSION_ID=(.*)', data)
if version:
clear_facts['distribution_major_version'] = version.groups()[0]
clear_facts['distribution_version'] = version.groups()[0]
release = re.search('ID=(.*)', data)
if release:
clear_facts['distribution_release'] = release.groups()[0]
return True, clear_facts
class Distribution(object):
"""
This subclass of Facts fills the distribution, distribution_version and distribution_release variables
To do so it checks the existence and content of typical files in /etc containing distribution information
This is unit tested. Please extend the tests to cover all distributions if you have them available.
"""
# every distribution name mentioned here, must have one of
# - allowempty == True
# - be listed in SEARCH_STRING
# - have a function get_distribution_DISTNAME implemented
OSDIST_LIST = (
{'path': '/etc/oracle-release', 'name': 'OracleLinux'},
{'path': '/etc/slackware-version', 'name': 'Slackware'},
{'path': '/etc/redhat-release', 'name': 'RedHat'},
{'path': '/etc/vmware-release', 'name': 'VMwareESX', 'allowempty': True},
{'path': '/etc/openwrt_release', 'name': 'OpenWrt'},
{'path': '/etc/system-release', 'name': 'Amazon'},
{'path': '/etc/alpine-release', 'name': 'Alpine'},
{'path': '/etc/arch-release', 'name': 'Archlinux', 'allowempty': True},
{'path': '/etc/os-release', 'name': 'SUSE'},
{'path': '/etc/SuSE-release', 'name': 'SUSE'},
{'path': '/etc/gentoo-release', 'name': 'Gentoo'},
{'path': '/etc/os-release', 'name': 'Debian'},
{'path': '/etc/lsb-release', 'name': 'Mandriva'},
{'path': '/etc/altlinux-release', 'name': 'Altlinux'},
{'path': '/etc/sourcemage-release', 'name': 'SMGL'},
{'path': '/usr/lib/os-release', 'name': 'ClearLinux'},
{'path': '/etc/coreos/update.conf', 'name': 'Coreos'},
{'path': '/etc/flatcar/update.conf', 'name': 'Flatcar'},
{'path': '/etc/os-release', 'name': 'NA'},
)
SEARCH_STRING = {
'OracleLinux': 'Oracle Linux',
'RedHat': 'Red Hat',
'Altlinux': 'ALT Linux',
'ClearLinux': 'Clear Linux Software for Intel Architecture',
'SMGL': 'Source Mage GNU/Linux',
}
# keep keys in sync with Conditionals page of docs
OS_FAMILY_MAP = {'RedHat': ['RedHat', 'Fedora', 'CentOS', 'Scientific', 'SLC',
'Ascendos', 'CloudLinux', 'PSBM', 'OracleLinux', 'OVS',
'OEL', 'Amazon', 'Virtuozzo', 'XenServer', 'Alibaba',
'EulerOS', 'openEuler'],
'Debian': ['Debian', 'Ubuntu', 'Raspbian', 'Neon', 'KDE neon',
'Linux Mint', 'SteamOS', 'Devuan', 'Kali', 'Cumulus Linux',
'Pop!_OS', 'Parrot', 'Pardus GNU/Linux'],
'Suse': ['SuSE', 'SLES', 'SLED', 'openSUSE', 'openSUSE Tumbleweed',
'SLES_SAP', 'SUSE_LINUX', 'openSUSE Leap'],
'Archlinux': ['Archlinux', 'Antergos', 'Manjaro'],
'Mandrake': ['Mandrake', 'Mandriva'],
'Solaris': ['Solaris', 'Nexenta', 'OmniOS', 'OpenIndiana', 'SmartOS'],
'Slackware': ['Slackware'],
'Altlinux': ['Altlinux'],
'SGML': ['SGML'],
'Gentoo': ['Gentoo', 'Funtoo'],
'Alpine': ['Alpine'],
'AIX': ['AIX'],
'HP-UX': ['HPUX'],
'Darwin': ['MacOSX'],
'FreeBSD': ['FreeBSD', 'TrueOS'],
'ClearLinux': ['Clear Linux OS', 'Clear Linux Mix'],
'DragonFly': ['DragonflyBSD', 'DragonFlyBSD', 'Gentoo/DragonflyBSD', 'Gentoo/DragonFlyBSD']}
OS_FAMILY = {}
for family, names in OS_FAMILY_MAP.items():
for name in names:
OS_FAMILY[name] = family
def __init__(self, module):
self.module = module
def get_distribution_facts(self):
distribution_facts = {}
# The platform module provides information about the running
# system/distribution. Use this as a baseline and fix buggy systems
# afterwards
system = platform.system()
distribution_facts['distribution'] = system
distribution_facts['distribution_release'] = platform.release()
distribution_facts['distribution_version'] = platform.version()
systems_implemented = ('AIX', 'HP-UX', 'Darwin', 'FreeBSD', 'OpenBSD', 'SunOS', 'DragonFly', 'NetBSD')
if system in systems_implemented:
cleanedname = system.replace('-', '')
distfunc = getattr(self, 'get_distribution_' + cleanedname)
dist_func_facts = distfunc()
distribution_facts.update(dist_func_facts)
elif system == 'Linux':
distribution_files = DistributionFiles(module=self.module)
# linux_distribution_facts = LinuxDistribution(module).get_distribution_facts()
dist_file_facts = distribution_files.process_dist_files()
distribution_facts.update(dist_file_facts)
distro = distribution_facts['distribution']
# look for a os family alias for the 'distribution', if there isnt one, use 'distribution'
distribution_facts['os_family'] = self.OS_FAMILY.get(distro, None) or distro
return distribution_facts
def get_distribution_AIX(self):
aix_facts = {}
rc, out, err = self.module.run_command("/usr/bin/oslevel")
data = out.split('.')
aix_facts['distribution_major_version'] = data[0]
if len(data) > 1:
aix_facts['distribution_version'] = '%s.%s' % (data[0], data[1])
aix_facts['distribution_release'] = data[1]
else:
aix_facts['distribution_version'] = data[0]
return aix_facts
def get_distribution_HPUX(self):
hpux_facts = {}
rc, out, err = self.module.run_command(r"/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'", use_unsafe_shell=True)
data = re.search(r'HPUX.*OE.*([AB].[0-9]+\.[0-9]+)\.([0-9]+).*', out)
if data:
hpux_facts['distribution_version'] = data.groups()[0]
hpux_facts['distribution_release'] = data.groups()[1]
return hpux_facts
def get_distribution_Darwin(self):
darwin_facts = {}
darwin_facts['distribution'] = 'MacOSX'
rc, out, err = self.module.run_command("/usr/bin/sw_vers -productVersion")
data = out.split()[-1]
if data:
darwin_facts['distribution_major_version'] = data.split('.')[0]
darwin_facts['distribution_version'] = data
return darwin_facts
def get_distribution_FreeBSD(self):
freebsd_facts = {}
freebsd_facts['distribution_release'] = platform.release()
data = re.search(r'(\d+)\.(\d+)-(RELEASE|STABLE|CURRENT).*', freebsd_facts['distribution_release'])
if 'trueos' in platform.version():
freebsd_facts['distribution'] = 'TrueOS'
if data:
freebsd_facts['distribution_major_version'] = data.group(1)
freebsd_facts['distribution_version'] = '%s.%s' % (data.group(1), data.group(2))
return freebsd_facts
def get_distribution_OpenBSD(self):
openbsd_facts = {}
openbsd_facts['distribution_version'] = platform.release()
rc, out, err = self.module.run_command("/sbin/sysctl -n kern.version")
match = re.match(r'OpenBSD\s[0-9]+.[0-9]+-(\S+)\s.*', out)
if match:
openbsd_facts['distribution_release'] = match.groups()[0]
else:
openbsd_facts['distribution_release'] = 'release'
return openbsd_facts
def get_distribution_DragonFly(self):
dragonfly_facts = {
'distribution_release': platform.release()
}
rc, out, dummy = self.module.run_command("/sbin/sysctl -n kern.version")
match = re.search(r'v(\d+)\.(\d+)\.(\d+)-(RELEASE|STABLE|CURRENT).*', out)
if match:
dragonfly_facts['distribution_major_version'] = match.group(1)
dragonfly_facts['distribution_version'] = '%s.%s.%s' % match.groups()[:3]
return dragonfly_facts
def get_distribution_NetBSD(self):
netbsd_facts = {}
# FIXME: poking at self.facts, should eventually make these each a collector
platform_release = platform.release()
netbsd_facts['distribution_major_version'] = platform_release.split('.')[0]
return netbsd_facts
def get_distribution_SMGL(self):
smgl_facts = {}
smgl_facts['distribution'] = 'Source Mage GNU/Linux'
return smgl_facts
def get_distribution_SunOS(self):
sunos_facts = {}
data = get_file_content('/etc/release').splitlines()[0]
if 'Solaris' in data:
# for solaris 10 uname_r will contain 5.10, for solaris 11 it will have 5.11
uname_r = get_uname(self.module, flags=['-r'])
ora_prefix = ''
if 'Oracle Solaris' in data:
data = data.replace('Oracle ', '')
ora_prefix = 'Oracle '
sunos_facts['distribution'] = data.split()[0]
sunos_facts['distribution_version'] = data.split()[1]
sunos_facts['distribution_release'] = ora_prefix + data
sunos_facts['distribution_major_version'] = uname_r.split('.')[1].rstrip()
return sunos_facts
uname_v = get_uname(self.module, flags=['-v'])
distribution_version = None
if 'SmartOS' in data:
sunos_facts['distribution'] = 'SmartOS'
if _file_exists('/etc/product'):
product_data = dict([l.split(': ', 1) for l in get_file_content('/etc/product').splitlines() if ': ' in l])
if 'Image' in product_data:
distribution_version = product_data.get('Image').split()[-1]
elif 'OpenIndiana' in data:
sunos_facts['distribution'] = 'OpenIndiana'
elif 'OmniOS' in data:
sunos_facts['distribution'] = 'OmniOS'
distribution_version = data.split()[-1]
elif uname_v is not None and 'NexentaOS_' in uname_v:
sunos_facts['distribution'] = 'Nexenta'
distribution_version = data.split()[-1].lstrip('v')
if sunos_facts.get('distribution', '') in ('SmartOS', 'OpenIndiana', 'OmniOS', 'Nexenta'):
sunos_facts['distribution_release'] = data.strip()
if distribution_version is not None:
sunos_facts['distribution_version'] = distribution_version
elif uname_v is not None:
sunos_facts['distribution_version'] = uname_v.splitlines()[0].strip()
return sunos_facts
return sunos_facts
class DistributionFactCollector(BaseFactCollector):
name = 'distribution'
_fact_ids = set(['distribution_version',
'distribution_release',
'distribution_major_version',
'os_family'])
def collect(self, module=None, collected_facts=None):
collected_facts = collected_facts or {}
facts_dict = {}
if not module:
return facts_dict
distribution = Distribution(module=module)
distro_facts = distribution.get_distribution_facts()
return distro_facts
| gpl-3.0 | -1,558,401,853,560,547,600 | 43.969697 | 129 | 0.566872 | false |
lsandig/apollon | ag.py | 1 | 6681 | #! /usr/bin/python3
# Command line program to create svg apollonian circles
# Copyright (c) 2014 Ludger Sandig
# This file is part of apollon.
# Apollon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Apollon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Apollon. If not, see <http://www.gnu.org/licenses/>.
import argparse
import sys
import math
from apollon import ApollonianGasket
from coloring import ColorMap, ColorScheme
def parseArguments(argv, colors):
description = "Generate Apollonian Gaskets and save as svg"
name = argv[0]
colors.append('none')
colors.sort()
parser = argparse.ArgumentParser(description=description, prog=name)
parser.add_argument("-d", "--depth", metavar="D", type=int, default=3, help="Recursion depth, generates 2*3^{D+1} circles. Usually safe for D<=10. For higher D use --force if you know what you are doing.")
parser.add_argument("-o", "--output", metavar="", type=str, default="", help="Output file name. If left blank, default is created from circle curvatures.")
parser.add_argument("-r", "--radii", action="store_true", default=False, help="Interpret c1, c2, c3 as radii and not as curvatures")
parser.add_argument("--color", choices=colors, metavar='SCHEME', default='none', help="Color Scheme. Choose from "+", ".join(colors))
parser.add_argument("--treshold", metavar='T', default=0.005, type=float, help="Don't save circles that are too small. Useful for higher depths to reduce filesize.")
parser.add_argument("--force", action="store_true", default=False, help="Use if you want a higher recursion depth than 10.")
parser.add_argument("c1", type=float, help="Curvature of first circle")
parser.add_argument("c2", type=float, help="Curvature of second circle")
parser.add_argument("c3", type=float, help="Curvature of third circle")
return parser.parse_args()
def colorMsg(color):
print("Available color schemes (name: resmin -- resmax)")
for i in color.info():
print("%s: %d -- %d" % (i["name"], i["low"], i["high"]))
def ag_to_svg(circles, colors, tresh=0.005):
"""
Convert a list of circles to svg, optionally color them.
@param circles: A list of L{Circle}s
@param colors: A L{ColorMap} object
@param tresh: Only circles with a radius greater than the product of tresh and maximal radius are saved
"""
svg = []
# Find the biggest circle, which hopefully is the enclosing one
# and has a negative radius because of this. Note that this does
# not have to be the case if we picked an unlucky set of radii at
# the start. If that was the case, we're screwed now.
big = min(circles, key=lambda c: c.r.real)
# Move biggest circle to front so it gets drawn first
circles.remove(big)
circles.insert(0, big)
if big.r.real < 0:
# Bounding box from biggest circle, lower left corner and two
# times the radius as width
corner = big.m - ( abs(big.r) + abs(big.r) * 1j )
vbwidth = abs(big.r)*2
width = 500 # Hardcoded!
# Line width independent of circle size
lw = (vbwidth/width)
svg.append('<svg xmlns="http://www.w3.org/2000/svg" width="%f" height="%f" viewBox="%f %f %f %f">\n' % (width, width, corner.real, corner.imag, vbwidth, vbwidth))
# Keep stroke width relative
svg.append('<g stroke-width="%f">\n' % lw)
# Iterate through circle list, circles with radius<radmin
# will not be saved because they are too small for printing.
radmin = tresh * abs(big.r)
for c in circles:
if abs(c.r) > radmin:
fill = colors.color_for(abs(c.r))
svg.append(( '<circle cx="%f" cy="%f" r="%f" fill="%s" stroke="black"/>\n' % (c.m.real, c.m.imag, abs(c.r), fill)))
svg.append('</g>\n')
svg.append('</svg>\n')
return ''.join(svg)
def impossible_combination(c1, c2, c3):
# If any curvatures x, y, z satisfy the equation
# x = 2*sqrt(y*z) + y + z
# then no fourth enclosing circle can be genereated, because it
# would be a line.
# We need to see for c1, c2, c3 if they could be "x".
impossible = False
sets = [(c1,c2,c3), (c2,c3,c1), (c3,c1,c2)]
for (x, y, z) in sets:
if x == 2*math.sqrt(y*z) + y + z:
impossible = True
return impossible
def main():
color = ColorScheme("colorbrewer.json")
available = [d['name'] for d in color.info()]
args = parseArguments(sys.argv, available)
# Sanity checks
for c in [args.c1, args.c2, args.c3]:
if c == 0:
print("Error: curvature or radius can't be 0")
exit(1)
if impossible_combination(args.c1, args.c2, args.c3):
print("Error: no apollonian gasket possible for these curvatures")
exit(1)
# Given curvatures were in fact radii, so take the reciprocal
if args.radii:
args.c1 = 1/args.c1
args.c2 = 1/args.c2
args.c3 = 1/args.c3
ag = ApollonianGasket(args.c1, args.c2, args.c3)
# At a recursion depth > 10 things start to get serious.
if args.depth > 10:
if not args.force:
print("Note: Number of cicles increases exponentially with 2*3^{D+1} at depth D.\nIf you want to use D>10, specify the --force option.")
args.depth = 10
ag.generate(args.depth)
# Get smallest and biggest radius
smallest = abs(min(ag.genCircles, key=lambda c: abs(c.r.real)).r.real)
biggest = abs(max(ag.genCircles, key=lambda c: abs(c.r.real)).r.real)
# Construct color map
if args.color == 'none':
mp = ColorMap('none')
else:
# TODO: resolution of 8 is hardcoded, some color schemes have
# resolutions up to 11. Make this configurable.
mp = color.makeMap(smallest, biggest, args.color, 8)
svg = ag_to_svg(ag.genCircles, mp, tresh=args.treshold)
# User supplied filename? If not, we need to construct something.
if len(args.output) == 0:
args.output = 'ag_%.4f_%.4f_%.4f.svg' % (args.c1, args.c2, args.c3)
with open(args.output, 'w') as f:
f.write(svg)
f.close()
if( __name__ == "__main__" ):
main()
| gpl-3.0 | 5,160,250,914,175,473,000 | 36.960227 | 209 | 0.640024 | false |
mmerce/python | bigml/tests/test_15_download.py | 1 | 4310 | # -*- coding: utf-8 -*-
#
# Copyright 2015-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Downloading dataset
"""
from .world import world, setup_module, teardown_module
from . import create_source_steps as source_create
from . import create_dataset_steps as dataset_create
from . import create_model_steps as model_create
class TestDownload(object):
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def teardown(self):
"""
Debug information
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully exporting a dataset:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I download the dataset file to "<local_file>"
Then file "<local_file>" is like file "<data>"
Examples:
| data | time_1 | time_2 | local_file |
| ../data/iris.csv | 30 | 30 | ./tmp/exported_iris.csv |
"""
print(self.test_scenario1.__doc__)
examples = [
['data/iris.csv', '30', '30', 'tmp/exported_iris.csv']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
dataset_create.i_export_a_dataset(self, example[3])
dataset_create.files_equal(self, example[3], example[0])
def test_scenario2(self):
"""
Scenario: Successfully creating a model and exporting it:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a model
And I wait until the model is ready less than <time_3> secs
And I export the <"pmml"> model to file "<expected_file>"
Then I check the model is stored in "<expected_file>" file in <"pmml">
Examples:
| data | time_1 | time_2 | time_3 | expected_file | pmml
| data/iris.csv | 10 | 10 | 10 | tmp/model/iris.json | false
| data/iris_sp_chars.csv | 10 | 10 | 10 | tmp/model/iris_sp_chars.pmml | true
"""
print(self.test_scenario2.__doc__)
examples = [
['data/iris.csv', '30', '30', '30', 'tmp/model/iris.json', False],
['data/iris_sp_chars.csv', '30', '30', '30', 'tmp/model/iris_sp_chars.pmml', True]]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
model_create.i_create_a_model(self)
model_create.the_model_is_finished_in_less_than(self, example[3])
model_create.i_export_model(self, example[5], example[4])
model_create.i_check_model_stored(self, example[4], example[5])
| apache-2.0 | 2,562,945,829,367,800,000 | 42.535354 | 108 | 0.572622 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.