repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
TheWiseLion/pykhet | tests/game_tests.py | 1 | 5304 | import unittest
from pykhet.components.types import MoveType, Move, TeamColor, Orientation
from pykhet.components.types import Position
from pykhet.games.game_types import ClassicGame
class TestClassicGames(unittest.TestCase):
def setUp(self):
self.game = ClassicGame()
def tearDown(self):
self.game = None
def test_available_moves_classic(self):
sphinx_moves_silver = self.game.get(0, 0).get_moves(self.game)
sphinx_moves_red = self.game.get(9, 7).get_moves(self.game)
# Sphinx Only Has 1 Move
self.assertEquals(len(sphinx_moves_silver), 1)
self.assertEquals(len(sphinx_moves_silver), len(sphinx_moves_red))
pharaoh_moves_silver = self.game.get(5, 0).get_moves(self.game)
pharaoh_moves_red = self.game.get(4, 7).get_moves(self.game)
# three moves, zero rotations
self.assertEquals(len(pharaoh_moves_red), 3)
self.assertEquals(len(pharaoh_moves_red), len(pharaoh_moves_silver))
# Test Anubises
anubis_moves_silver = self.game.get(4, 0).get_moves(self.game)
anubis_moves_red = self.game.get(5, 7).get_moves(self.game)
# four move, two rotations
self.assertEquals(len(anubis_moves_red), 6)
self.assertEquals(len(anubis_moves_red), len(anubis_moves_silver))
anubis_moves_silver = self.game.get(6, 0).get_moves(self.game)
anubis_moves_red = self.game.get(3, 7).get_moves(self.game)
# three moves, two rotations
self.assertEquals(len(anubis_moves_red), 5)
self.assertEquals(len(anubis_moves_red), len(anubis_moves_silver))
# Test Scarabs
scarab1_moves_silver = self.game.get(4, 3).get_moves(self.game)
scarab1_moves_red = self.game.get(5, 4).get_moves(self.game)
# 4 moves, 1 swap, 2 rotations
self.assertEquals(len(scarab1_moves_silver), 7)
self.assertEquals(len(scarab1_moves_red), len(scarab1_moves_silver))
scarab2_moves_silver = self.game.get(5, 3).get_moves(self.game)
scarab2_moves_red = self.game.get(4, 4).get_moves(self.game)
# 5 moves, 2 rotations
self.assertEquals(len(scarab2_moves_silver), 7)
self.assertEquals(len(scarab2_moves_red), len(scarab2_moves_silver))
# Test Pyramids:
p1_silver = self.game.get(2, 1).get_moves(self.game)
p1_red = self.game.get(7, 6).get_moves(self.game)
# 6 moves, 2 rotations
self.assertEquals(len(p1_silver), 8)
self.assertEquals(len(p1_red), len(p1_silver))
p2_silver = self.game.get(6, 5).get_moves(self.game)
p2_red = self.game.get(3, 2).get_moves(self.game)
# 5 moves, 2 rotations
self.assertEquals(len(p2_red), 7)
self.assertEquals(len(p2_red), len(p2_silver))
p3_silver = self.game.get(0, 3).get_moves(self.game)
p3_red = self.game.get(9, 3).get_moves(self.game)
# 4 moves, 2 rotations
self.assertEquals(len(p3_red), 6)
self.assertEquals(len(p3_red), len(p3_silver))
p3_silver = self.game.get(0, 4).get_moves(self.game)
p3_red = self.game.get(9, 4).get_moves(self.game)
# 4 moves, 2 rotations
self.assertEquals(len(p3_red), 6)
self.assertEquals(len(p3_red), len(p3_silver))
p4_silver = self.game.get(2, 3).get_moves(self.game)
p4_red = self.game.get(7, 4).get_moves(self.game)
# 6 moves, 2 rotations
self.assertEquals(len(p4_red), 8)
self.assertEquals(len(p4_red), len(p4_silver))
p5_silver = self.game.get(7, 0).get_moves(self.game)
p5_red = self.game.get(2, 7).get_moves(self.game)
# 4 moves, 2 rotations
self.assertEquals(len(p5_silver), 6)
self.assertEquals(len(p5_red), len(p5_silver))
def test_destroy_pieces_classic(self):
self.game.apply_move(Move(MoveType.move, Position(2, 1), Position(2, 0)))
self.game.apply_laser(TeamColor.silver)
self.game.apply_move(Move(MoveType.move, Position(7, 6), Position(7, 7)))
self.game.apply_laser(TeamColor.red)
self.game.apply_move(Move(MoveType.rotate, Position(0, 0), Orientation.right))
self.game.apply_laser(TeamColor.silver)
self.assertEquals(len(self.game.squares_with_pieces_of_color(TeamColor.silver)),
len(self.game.squares_with_pieces_of_color(TeamColor.red)) + 1)
self.game.apply_move(Move(MoveType.rotate, Position(9, 7), Orientation.left))
self.game.apply_laser(TeamColor.red)
self.assertEquals(len(self.game.squares_with_pieces_of_color(TeamColor.silver)),
len(self.game.squares_with_pieces_of_color(TeamColor.red)))
def test_red_wins_classic(self):
self.game.apply_move(Move(MoveType.move, Position(0, 3), Position(0, 2)))
self.game.apply_move(Move(MoveType.move, Position(3, 2), Position(5, 2)))
self.game.apply_laser(TeamColor.silver)
self.assertEquals(self.game.winner, TeamColor.red)
def simple_silver_win(self):
pass
def test_same_number_moves(self):
red_moves = self.game.get_available_moves(TeamColor.red)
silver_moves = self.game.get_available_moves(TeamColor.silver)
self.assertEquals(len(red_moves), len(silver_moves))
| mit | 1,106,180,224,603,944,700 | 41.774194 | 89 | 0.643477 | false |
takeshineshiro/heat | heat/common/timeutils.py | 1 | 2831 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities for handling ISO 8601 duration format.
"""
import datetime
import random
import re
import time
from heat.common.i18n import _
iso_duration_re = re.compile('PT(?:(\d+)H)?(?:(\d+)M)?(?:(\d+)S)?$')
wallclock = time.time
class Duration(object):
'''
Note that we don't attempt to handle leap seconds or large clock
jumps here. The latter are assumed to be rare and the former
negligible in the context of the timeout. Time zone adjustments,
Daylight Savings and the like *are* handled. PEP 418 adds a proper
monotonic clock, but only in Python 3.3.
'''
def __init__(self, timeout=0):
self._endtime = wallclock() + timeout
def expired(self):
return wallclock() > self._endtime
def endtime(self):
return self._endtime
def parse_isoduration(duration):
"""
Convert duration in ISO 8601 format to second(s).
Year, Month, Week, and Day designators are not supported.
Example: 'PT12H30M5S'
"""
result = iso_duration_re.match(duration)
if not result:
raise ValueError(_('Only ISO 8601 duration format of the form '
'PT#H#M#S is supported.'))
t = 0
t += (3600 * int(result.group(1))) if result.group(1) else 0
t += (60 * int(result.group(2))) if result.group(2) else 0
t += int(result.group(3)) if result.group(3) else 0
return t
def retry_backoff_delay(attempt, scale_factor=1.0, jitter_max=0.0):
"""
Calculate an exponential backoff delay with jitter.
Delay is calculated as
2^attempt + (uniform random from [0,1) * jitter_max)
:param attempt: The count of the current retry attempt
:param scale_factor: Multiplier to scale the exponential delay by
:param jitter_max: Maximum of random seconds to add to the delay
:returns: Seconds since epoch to wait until
"""
exp = float(2 ** attempt) * float(scale_factor)
if jitter_max == 0.0:
return exp
return exp + random.random() * jitter_max
def round_to_seconds(dt):
"""Round a datetime to the nearest second."""
rounding = 0
if dt.microsecond >= 500000:
rounding = 1
return dt + datetime.timedelta(0, rounding,
-dt.microsecond)
| apache-2.0 | -6,785,138,763,645,567,000 | 29.771739 | 78 | 0.655245 | false |
DogDogGoose/ptab | ptab/core.py | 1 | 15266 | import sys
import requests
import requests_cache
import string
import unicodedata
import json
import shutil
import os
import re
import pprint
import pathvalidate
import ptab.cgi
#
# PTAB API
#
baseURL = 'https://developer.uspto.gov/ptab-api'
docsURL = 'https://developer.uspto.gov/ptab-api/documents'
trialsURL = 'https://developer.uspto.gov/ptab-api/proceedings'
dateURL = 'https://developer.uspto.gov/ptab-api/proceedings'
postfixdocs = '/documents'
postfixdoczip = '/documents.zip'
# ptabcert = "~/scripts/ptab/ptab.pem"
PTAB_MAX_RESULTS = 25
###########################
class ptabgrab(object):
"""
utilizes the ptab rest api
"""
def __new__(cls, verbose=False):
newobj = object.__new__(cls)
newobj.verbose = verbose
return newobj
def __init__(self, verbose=False):
self.verbose = verbose
self.verify = False
self.outdir = ''
self.download = True
self.dumpJson = False
requests_cache.install_cache(cache_name='ptab_cache', backend='sqlite', expire_after=36000) # expire after 10 hours
requests.packages.urllib3.disable_warnings()
def __str__(self):
return "%s documents found." % self.getNumDocs()
def getOutputDir(self):
return self.outdir
def setOutputDir(self, odir):
newodir = os.path.join(odir, '')
if not os.path.isdir(newodir):
os.makedirs(newodir)
self.outdir = newodir
return
# TODO
def setCertificate(certpath):
# check path
# verify cert
self.verify = True
# TODO
def getNumDocs():
return 0
def cleanUnicodeString(self, filename, filterForPath):
filteredUnicode = filename.replace('/', '-').replace('"', '').replace("'", "") if (filterForPath) else filename
filteredAscii = unicodedata.normalize('NFKC', filteredUnicode).encode('ascii', 'ignore')
return pathvalidate.sanitize_filename(filteredAscii.decode('utf-8'))
#
# curlFile
# used to save files
#
def curlFile(self, fileurl, filename):
# filesystype = sys.getfilesystemencoding() # should generally be 'utf-8'
cleanfilename = self.outdir + self.cleanUnicodeString(filename, 1)
if self.verbose:
print ("\tDownloading (%s)" % cleanfilename)
if os.path.exists(cleanfilename):
print ("\tSKIPPING: %s already exists!" % cleanfilename)
return 0
if self.download:
myheaders = {'Accept-Encoding': 'deflate'}
r = requests.get(fileurl, stream=True, verify=self.verify, headers=myheaders)
if r.status_code == 200:
with open(cleanfilename, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
else:
print ("Downloads Disabled: URL <%s>" % fileurl )
return 1
def getDocumentListURL (self, dktnum):
return self.buildTrialsUrl(dktnum);
#
# Alternative way to search dockets. This sometimes works better,
# though the PTAB interface for both trials and documents can be flaky.
#
# default to IPR over CBM
# docketstr = dktnum if re.search(r'(IPR|CBM)20\d{2}-\d{5}', dktnum) else ("IPR" + dktnum)
# cgimaker = cgi.builder()
# if not cgimaker.addArgument('trialNumber', docketstr):
# print ("\tERROR: improper docket %s" % docketstr)
# return ''
# return docsURL + cgimaker.getCGIStr()
def buildTrialsUrl(self, dktnum, startingRecordNumber = -1):
dktnum = dktnum.upper()
docketstr = ''
if re.search(r'(IPR|CBM)20\d{2}-\d{5}', dktnum):
docketstr = dktnum
else:
# default to IPR over CBM
docketstr = "IPR" + dktnum
testbuilder = ptab.cgi.builder()
testbuilder.addArgument("sortOrder")
if testbuilder.addArgument("proceedingNumber", docketstr):
if self.verbose:
print ("Added (%s : %s)." % ('proceedingNumber', docketstr))
else:
print ("ERROR: FAILED TO ADD (%s : %s)." % (key, val) )
# TODO: add iterative paging functionality using offset to page through results
if startingRecordNumber >= 0:
if self.verbose:
print ("Iterating starting with record number (%i)" % startingRecordNumber)
testbuilder.addArgument('recordStart', startingRecordNumber)
testbuilder.addArgument('recordQuantity', PTAB_MAX_RESULTS)
targetUrl = docsURL + testbuilder.getCGIStr()
return targetUrl
#
#
def buildDocsUrl(self, filterarguments):
testbuilder = ptab.cgi.builder()
for key, val in filterarguments.iteritems():
if testbuilder.addArgument(key, val):
if self.verbose: print ("Added (%s : %s)." % (key, val))
else:
print ("ERROR: FAILED TO ADD (%s : %s)." % (key, val) )
return docsURL + testbuilder.getCGIStr()
#
#
def curlJson(self, targetUrl):
if self.verbose:
print ("Getting <%s>" % targetUrl)
result = 0
if self.verify:
# TODO
# dktmeta = requests.get(targetUrl, cert=ptabcert)
result = 0
else:
try:
result = requests.get(targetUrl, verify=self.verify)
print ("Curl got result (%s)" % result)
except ValueError:
print ("ERROR: Could Not access URL.")
return result
def parseJsonList(self, jsonstr):
parsedjson = json.loads(jsonstr)
if (self.dumpJson):
json_dump_text = jsonstr.encode('ascii', 'ignore')
text_file = open("JSONdump.txt", "a")
text_file.write(json_dump_text.decode('utf-8'))
text_file.close()
return parsedjson
#
# Downloads all links in the json list
#
def downloadJsonLinks(self, jsonstr):
jsondata = self.parseJsonList(jsonstr)
resultsList = jsondata.get('results')
for document in resultsList:
# print ("Number, title: (%s, %s)" % (document['documentNumber'], document['title']))
fname_raw = document.get('documentNumber').zfill(4) + " - " + document.get('documentTitleText')
fname = fname_raw.replace('.', '') + ".pdf"
if self.verbose:
print ("Processing (%s)" % self.cleanUnicodeString(fname, 0))
docID = document.get('documentIdentifier')
downloadLink = baseURL + '/documents/{documentIdentifier}/download'.replace('{documentIdentifier}', docID)
if self.verbose:
print ("\tURL <%s>" % downloadLink)
self.curlFile(downloadLink, fname)
retval = dict()
retval['downloadedRecords'] = len (resultsList)
retval['totalRecords'] = jsondata.get('recordTotalQuantity')
return retval
#
# Downloads all documents in the docket
#
def getDocsInDocket(self, dktnum):
allDocsDownloaded = False
currentRecord = 0
while not allDocsDownloaded:
ptabJsonList = self.curlJson( self.buildTrialsUrl(dktnum, currentRecord) )
if ptabJsonList:
status = self.downloadJsonLinks(ptabJsonList.text)
numDocs = status['downloadedRecords']
totalDocs = status['totalRecords']
if self.verbose:
print ("Found %s documents." % numDocs)
print ("Found %s total docs in docket." % totalDocs)
if currentRecord >= totalDocs:
allDocsDownloaded = True
else:
currentRecord += PTAB_MAX_RESULTS
next
else:
print ("ERROR: Could not read URL")
next
#
# Get a specific paper in a docket
def getPaper(self, dktnum, papernum):
if self.verbose:
print ("Getting Paper (%s) from Docket (%s)" % (papernum, dktnum))
ptabJsonList = self.curlJson( self.getDocumentListURL(dktnum) )
if ptabJsonList:
rawresults = self.parseJsonList(ptabJsonList.text)
docketDocsList = rawresults.get('results')
ptabObj = self.findPaper(papernum, docketDocsList)
if (ptabObj):
downloadUrl = self.getLink(ptabObj)
if self.verbose:
print ("\tURL <%s>" % downloadUrl)
fname_raw = ptabObj.get('documentNumber') + " - " + ptabObj.get('title')
fname = fname_raw.replace('.', '') + ".pdf"
return self.curlFile(downloadUrl, fname)
else:
print ("ERROR: Could not find paper (%s)" % papernum)
return 0
else:
print ("ERROR: getPaper() could not read URL for docket")
return
def findPaper(self, paperNumber, docketList):
if (len(docketList) > 1):
hitList = list(filter(lambda xlist: xlist.get('documentNumber') == paperNumber, docketList))
if (len(hitList) == 1):
return hitList[0];
elif (len(hitList > 1)):
print ("Warning: findPaper() found more than 1 paper number (%s); using first result" % paperNumber)
return hitList[0];
else:
print ("ERROR: findPaper() could not find paper number (%s)" % paperNumber)
return 0
else:
return 0
def getLink(self, ptabResultObj):
for link in ptabResultObj.get('links'):
if link['rel'] == 'download':
# account for an error in formatting that sometimes appears in the ptab json feed
return re.sub(r'ptab-api[\\/]+ptab-api', 'ptab-api', link['href'])
# if program falls out of the for loop
return 0
#
# In development...
#
def searchDocuments(self, filterarguments):
targetUrl = self.buildDocsUrl(filterarguments)
if self.verbose:
print ("Using search string:" + "\t" + targetUrl)
results = self.curlJson(targetUrl)
if results:
numDocs = self.downloadJsonLinks(results.text)
else:
print ("ERROR: Could not read URL")
return
if self.verbose:
print ("Found %s documents." % numDocs)
return
#
# Main access point. Gets all dockets with a certain party as petitioner or po
#
def getDocketsByParty(self, partyname, earliestfilingdate):
dockets = self.locateDocketsByParty(partyname, earliestfilingdate)
baseOutDir = self.getOutputDir()
for dock in dockets:
(petitioner, patentowner, dkt, status) = dock[0:4]
print ("* {0} v {1} ({2}) - {3}".format(petitioner, patentowner, dkt, status))
docketOutDir = self.makeSafeFilename("{0}-{1} v {2}".format(dkt, petitioner, patentowner))
cleanDocketOutDir = docketOutDir.encode('ascii', 'ignore')
newdir = os.path.join(baseOutDir, cleanDocketOutDir)
self.setOutputDir(newdir)
self.getDocsInDocket(dkt)
#
# Locates all dockets with a certain party as petitioner or po
# Recursive
#
def locateDocketsByParty(self, partyname, earliestfilingdate, offset=0):
searchDateUrl = self.buildDateUrl(earliestfilingdate, offset)
if self.verbose:
print ("Using search string:" + "\t" + searchDateUrl)
results = self.curlJson(searchDateUrl)
if results:
dockets = []
(totalCount, nextOffset) = self.filterJsonResultsByParty(results.text, partyname, dockets)
# This is how we know we have more to do
if (totalCount >= nextOffset):
newDockets = self.locateDocketsByParty(partyname, earliestfilingdate, offset=nextOffset)
if (len(newDockets) > 0):
dockets += newDockets
return dockets
else:
print ("ERROR: Could not read URL")
return []
#
# Makes a date query for ptab API
#
def buildDateUrl(self, earliestfilingdate, offset=0):
if not earliestfilingdate:
earliestfilingdate = "2012-09-16" # enactment date
searchObj = re.search( r'^\d{4}-\d{2}-\d{2}$', earliestfilingdate, re.I)
if searchObj:
print ("Using earliest filing date of %s" % earliestfilingdate)
else:
searchObj = re.search( r'^\d{4}$', earliestfilingdate, re.I)
if searchObj:
year = earliestfilingdate
earliestfilingdate = year + "-01-01"
print ("Year %s provided; using earliest filing date of %s" % (year, earliestfilingdate))
else:
print ("ERROR: Invalid starting date (%s)" % earliestfilingdate)
querybuilder = ptab.cgi.builder()
querybuilder.addArgument('filingDateFrom', earliestfilingdate)
querybuilder.addArgument('limit')
querybuilder.addArgument('offset', offset)
return dateURL + querybuilder.getCGIStr()
#
# takes a JSON list from the all trials query, filters by party name
#
def filterJsonResultsByParty(self, jsonstr, partyname, dockets):
parsedjson = json.loads(jsonstr)
results = parsedjson.get('results')
numResults = len(results)
if (self.dumpJson):
json_dump_text = jsonstr.encode('ascii', 'ignore')
text_file = open("JSONdump.txt", "a")
text_file.write(json_dump_text.decode('utf-8'))
text_file.close()
if (results is None):
return []
filteredlist = filter(lambda x: re.search(r'' + re.escape(partyname), x.get('petitionerPartyName', '') + x.get('patentOwnerName', ''), re.IGNORECASE), results)
# consider changing this loop to a map function
for proceeding in filteredlist:
petitionerNameRaw = proceeding.get('petitionerPartyName', '[OMITTED]')
poNameRaw = proceeding.get('patentOwnerName', '[OMITTED]')
trialNumber = proceeding.get('proceedingNumber')
status = proceeding.get('prosecutionStatus')
dockets.append([petitionerNameRaw, poNameRaw, trialNumber, status])
# Now figure out how many items are left
meta = parsedjson.get('metadata')
currentLimit = meta.get("limit")
currentOffset = meta.get("offset")
currentCount = meta.get("count")
print ("Limit (%s), Offset (%s), Count (%s)" % (currentLimit, currentOffset, currentCount))
return (currentCount, currentOffset + currentLimit)
#
# Make a string safe
#
def makeSafeFilename(self, inputFilename):
try:
safechars = string.letters + string.digits + " -_."
return filter(lambda c: c in safechars, inputFilename)
except:
return ""
pass
| lgpl-3.0 | 4,362,861,265,485,905,400 | 32.699779 | 167 | 0.578672 | false |
umich-brcf-bioinf/Connor | test/command_validator_test.py | 1 | 38546 | #pylint: disable=invalid-name, too-few-public-methods, too-many-public-methods
#pylint: disable=protected-access, missing-docstring, too-many-locals
#pylint: disable=too-many-arguments
#pylint: disable=deprecated-method
from __future__ import print_function, absolute_import, division
from argparse import Namespace
import os
from test.utils_test import BaseConnorTestCase
from testfixtures.tempdirectory import TempDirectory
import connor.command_validator as validator
import connor.utils as utils
from connor.utils import UsageError
class MockTask(object):
def __init__(self, error_message=None):
self.error_message = error_message
self.args = None
self.execute_called = False
self.log = None
def execute(self, args, log):
self.execute_called = True
if self.error_message:
raise Exception(self.error_message)
else:
self.args = args
self.log = log
class CommandValidatorTest(BaseConnorTestCase):
def test_Validations(self):
function_names = [f.__name__ for f in validator._VALIDATIONS]
self.assertEqual(['_check_input_bam_exists',
'_check_input_bam_valid',
'_check_input_bam_indexed',
'_check_input_bam_not_deduped',
'_check_input_bam_not_empty',
'_check_input_bam_no_secondary',
'_check_input_bam_paired',
'_check_input_bam_properly_paired',
'_check_input_bam_consistent_length',
'_check_overwrite_output'],
function_names)
def test_preflight_runsAllValidations(self):
task1 = MockTask()
task2 = MockTask()
validator._VALIDATIONS = [task1.execute,
task2.execute]
args = Namespace()
log = self.mock_logger
validator.preflight(args, log)
self.assertTrue(task1.execute_called)
self.assertEqual(task1.args, args)
self.assertEqual(task1.log, log)
self.assertTrue(task2.execute_called)
self.assertEqual(task2.args, args)
self.assertEqual(task2.log, log)
def test_check_input_bam_exists_ok(self):
with TempDirectory() as tmp_dir:
tmp_dir.write('input.bam', b'foo')
input_bam_path = os.path.join(tmp_dir.path, 'input.bam')
args = Namespace(input_bam=input_bam_path)
validator._check_input_bam_exists(args)
self.ok()
def test_check_input_bam_exists_raisesUsageError(self):
with TempDirectory() as tmp_dir:
input_bam_path = os.path.join(tmp_dir.path, 'input.bam')
args = Namespace(input_bam=input_bam_path)
self.assertRaisesRegexp(utils.UsageError,
r'\[.*input.bam\] does not exist',
validator._check_input_bam_exists,
args)
def test_check_input_bam_valid_ok(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=False)
args = Namespace(input_bam=input_bam_path)
validator._check_input_bam_valid(args)
self.ok()
def test_check_input_bam_valid_raisesUsageError(self):
with TempDirectory() as tmp_dir:
tmp_dir.write('input.bam', b'foo')
input_bam_path = os.path.join(tmp_dir.path, 'input.bam')
args = Namespace(input_bam=input_bam_path)
self.assertRaisesRegexp(utils.UsageError,
r'\[.*input.bam\] not a valid BAM',
validator._check_input_bam_valid,
args)
def test_check_input_bam_indexed_ok(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path)
validator._check_input_bam_indexed(args)
self.ok()
def test_check_input_bam_indexed_raisesUsageError(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=False)
args = Namespace(input_bam=input_bam_path)
self.assertRaisesRegexp(utils.UsageError,
r'\[.*input.bam\] is not indexed',
validator._check_input_bam_indexed,
args)
def test_check_input_bam_not_deduped_ok(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
@PG|ID:foo|PN:bwa
readNameA1|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=True)
validator._check_input_bam_not_deduped(args, self.mock_logger)
self.ok()
self.assertEqual(0, len(self.mock_logger._log_calls))
def test_check_input_bam_not_deduped_raisesUsageError(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
@PG|ID:foo|PN:connor
readNameA1|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
regex = (r'\[.*input.bam\] has already been processed with Connor'
r'.*Are you sure.*force')
self.assertRaisesRegexp(utils.UsageError,
regex,
validator._check_input_bam_not_deduped,
args)
def test_check_input_bam_not_deduped_noPgHeader(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
validator._check_input_bam_not_deduped(args)
self.ok()
def test_check_input_bam_not_deduped_noPnHeader(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
@PG|ID:bwa|VN:1.3
readNameA1|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
validator._check_input_bam_not_deduped(args)
self.ok()
def test_check_input_bam_not_deduped_warnIfForced(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
@PG|ID:foo|PN:connor
readNameA1|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=True)
validator._check_input_bam_not_deduped(args,
log=self.mock_logger)
warnings = self.mock_logger._log_calls['WARNING']
self.assertEqual(1, len(warnings))
regex = (r'\[.*input.bam\] has already been processed with Connor'
r'.*forcing')
self.assertRegexpMatches(warnings[0], regex)
def test_check_input_bam_not_empty_raiseUsageError(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
'''.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
self.assertRaisesRegexp(utils.UsageError,
r'\[.*input.bam\] is empty',
validator._check_input_bam_not_empty,
args)
def test_check_input_bam_not_empty_ok(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
validator._check_input_bam_not_empty(args)
self.ok()
def test_check_input_bam_no_secondary_raisesUsageError(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|{flag}|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''
sam_contents = sam_contents.format(flag='256').replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
regex = r'\[.*input.bam\] contains secondary alignments\..*'
self.assertRaisesRegexp(utils.UsageError,
regex,
validator._check_input_bam_no_secondary,
args)
def test_check_input_bam_no_secondary_ok(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|{flag}|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''
sam_contents = sam_contents.format(flag='1')
sam_contents = sam_contents.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=True)
validator._check_input_bam_no_secondary(args, self.mock_logger)
self.ok()
self.assertEqual(0, len(self.mock_logger._log_calls))
def test_check_input_bam_no_secondary_warnIfForced(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|{flag}|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''
sam_contents = sam_contents.format(flag='256').replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=True)
validator._check_input_bam_no_secondary(args, self.mock_logger)
warnings = self.mock_logger._log_calls['WARNING']
self.assertEqual(1, len(warnings))
regex = r'\[.*input.bam\] contains secondary alignments\..*forcing'
self.assertRegexpMatches(warnings[0], regex)
def test_check_input_bam_paired_raisesUsageError(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|{flag}|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''
sam_contents = sam_contents.format(flag='16').replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
regex = r'\[.*input.bam\] does not appear to contain paired reads'
self.assertRaisesRegexp(utils.UsageError,
regex,
validator._check_input_bam_paired,
args)
def test_check_input_bam_paired_ok(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|{unpaired_flag}|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA1|{paired_flag}|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''
sam_contents = sam_contents.format(unpaired_flag='16', paired_flag='99')
sam_contents = sam_contents.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=True)
validator._check_input_bam_paired(args, self.mock_logger)
self.ok()
self.assertEqual(0, len(self.mock_logger._log_calls))
def test_check_input_bam_paired_warnIfForced(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|{flag}|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''
sam_contents = sam_contents.format(flag='16').replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=True)
validator._check_input_bam_paired(args, self.mock_logger)
warnings = self.mock_logger._log_calls['WARNING']
self.assertEqual(1, len(warnings))
regex = (r'\[.*input.bam\] does not appear to contain paired '
r'reads.*forcing')
self.assertRegexpMatches(warnings[0], regex)
def test_check_input_bam_properly_paired_raisesUsageError(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|{flag}|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''
sam_contents = sam_contents.format(flag='1').replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
regex = r'\[.*input.bam\] does not appear to contain any properly paired alignments'
self.assertRaisesRegexp(utils.UsageError,
regex,
validator._check_input_bam_properly_paired,
args)
def test_check_input_bam_properly_paired_ok(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|{unpaired_flag}|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA1|{paired_flag}|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''
sam_contents = sam_contents.format(unpaired_flag='2', paired_flag='99')
sam_contents = sam_contents.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=True)
validator._check_input_bam_properly_paired(args, self.mock_logger)
self.ok()
self.assertEqual(0, len(self.mock_logger._log_calls))
def test_check_input_bam_properly_paired_warnIfForced(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|{flag}|chr10|100|20|5M|=|300|200|AAAAA|>>>>>'''
sam_contents = sam_contents.format(flag='1').replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=True)
validator._check_input_bam_properly_paired(args, self.mock_logger)
warnings = self.mock_logger._log_calls['WARNING']
self.assertEqual(1, len(warnings))
regex = (r'\[.*input.bam\] does not appear to contain any properly paired '
r'alignments.*forcing')
self.assertRegexpMatches(warnings[0], regex)
def test_check_input_bam_barcoded_ok(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA1|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA2|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA2|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA3|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA3|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA4|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA4|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA5|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA5|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
'''
sam_contents = sam_contents.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=True)
validator._check_input_bam_barcoded(args, self.mock_logger)
self.ok()
self.assertEquals(0, len(self.mock_logger._log_calls))
def test_check_input_bam_barcoded_okAtThreshold(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|8M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA1|147|chr10|100|20|8M|=|300|200|AAAAANNN|>>>>>>>>
readNameA2|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA2|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA3|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA3|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA4|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA4|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA5|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA5|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
'''
sam_contents = sam_contents.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
validator._check_input_bam_barcoded(args, self.mock_logger)
self.ok()
self.assertEquals(0, len(self.mock_logger._log_calls))
def test_check_input_bam_barcoded_leftUnbarcodedRaisesUsageError(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|8M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA1|147|chr10|100|20|8M|=|300|200|AAAAANNN|>>>>>>>>
readNameA2|99|chr10|100|20|8M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA2|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA3|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA3|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA4|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA4|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA5|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA5|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
'''
sam_contents = sam_contents.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
regex = r'\[.*input.bam\] reads do not appear to have barcodes'
self.assertRaisesRegexp(utils.UsageError,
regex,
validator._check_input_bam_barcoded,
args)
def test_check_input_bam_barcoded_rightUnbarcodedRaisesUsageError(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|8M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA1|147|chr10|100|20|8M|=|300|200|AAAAANNN|>>>>>>>>
readNameA2|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA2|147|chr10|100|20|8M|=|300|200|AAAAANNN|>>>>>>>>
readNameA3|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA3|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA4|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA4|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA5|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA5|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
'''
sam_contents = sam_contents.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
regex = r'\[.*input.bam\] reads do not appear to have barcodes'
self.assertRaisesRegexp(utils.UsageError,
regex,
validator._check_input_bam_barcoded,
args)
def test_check_input_bam_barcoded_warnIfForced(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|8M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA1|147|chr10|100|20|8M|=|300|200|AAAAANNN|>>>>>>>>
readNameA2|99|chr10|100|20|8M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA2|147|chr10|100|20|8M|=|300|200|AAAAANNN|>>>>>>>>
readNameA3|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA3|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA4|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA4|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
readNameA5|99|chr10|100|20|3S5M|=|300|200|NNNAAAAA|>>>>>>>>
readNameA5|147|chr10|100|20|5M3S|=|300|200|AAAAANNN|>>>>>>>>
'''
sam_contents = sam_contents.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=True)
validator._check_input_bam_barcoded(args, self.mock_logger)
warnings = self.mock_logger._log_calls['WARNING']
self.assertEqual(1, len(warnings))
regex = r'\[.*input.bam\] reads do not appear to have barcodes.*forcing'
self.assertRegexpMatches(warnings[0], regex)
def test_check_input_bam_consistent_length_okAtThreshold(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|10M|=|300|200|AAAAANNNNN|>>>>>!!!!!
readNameA2|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA3|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA4|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA5|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA6|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA7|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA8|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA9|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA0|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA1|147|chr10|100|20|10M|=|300|200|AAAAANNNNN|>>>>>!!!!!
readNameA2|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA3|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA4|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA5|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA6|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA7|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA8|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA9|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA0|147|chr10|100|20|3M|=|300|200|AAA|>>>
'''
sam_contents = sam_contents.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=True)
validator._check_input_bam_consistent_length(args, self.mock_logger)
self.ok()
self.assertEquals(0, len(self.mock_logger._log_calls))
def test_check_input_bam_consistent_length_posRaisesUsageError(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|10M|=|300|200|AAAAANNNNN|>>>>>!!!!!
readNameA2|99|chr10|100|20|8M|=|300|200|AAAAANNN|>>>>>!!!
readNameA3|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA4|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA5|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA6|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA7|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA8|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA9|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA0|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA1|147|chr10|100|20|10M|=|300|200|AAAAANNNNN|>>>>>!!!!!
readNameA2|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA3|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA4|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA5|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA6|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA7|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA8|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA9|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA0|147|chr10|100|20|3M|=|300|200|AAA|>>>
'''
sam_contents = sam_contents.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
regex = (r'\[.*input.bam\] reads appear to have inconsistent '
r'sequence lengths\..*force')
self.assertRaisesRegexp(UsageError,
regex,
validator._check_input_bam_consistent_length,
args)
def test_check_input_bam_consistent_length_negRaisesUsageError(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|10M|=|300|200|AAAAANNNNN|>>>>>!!!!!
readNameA2|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA3|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA4|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA5|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA6|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA7|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA8|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA9|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA0|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA1|147|chr10|100|20|10M|=|300|200|AAAAANNNNN|>>>>>!!!!!
readNameA2|147|chr10|100|20|5M|=|300|200|AAANN|>>>!!
readNameA3|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA4|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA5|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA6|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA7|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA8|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA9|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA0|147|chr10|100|20|3M|=|300|200|AAA|>>>
'''
sam_contents = sam_contents.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=False)
regex = (r'\[.*input.bam\] reads appear to have inconsistent '
r'sequence lengths\..*force')
self.assertRaisesRegexp(UsageError,
regex,
validator._check_input_bam_consistent_length,
args)
def test_check_input_bam_consistent_length_warnIfForced(self):
sam_contents = \
'''@HD|VN:1.4|GO:none|SO:coordinate
@SQ|SN:chr10|LN:135534747
readNameA1|99|chr10|100|20|10M|=|300|200|AAAAANNNNN|>>>>>!!!!!
readNameA2|99|chr10|100|20|8M|=|300|200|AAAAANNN|>>>>>!!!
readNameA3|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA4|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA5|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA6|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA7|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA8|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA9|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA0|99|chr10|100|20|5M|=|300|200|AAAAA|>>>>>
readNameA1|147|chr10|100|20|10M|=|300|200|AAAAANNNNN|>>>>>!!!!!
readNameA2|147|chr10|100|20|5M|=|300|200|AAANN|>>>!!
readNameA3|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA4|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA5|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA6|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA7|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA8|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA9|147|chr10|100|20|3M|=|300|200|AAA|>>>
readNameA0|147|chr10|100|20|3M|=|300|200|AAA|>>>
'''
sam_contents = sam_contents.replace("|", "\t")
with TempDirectory() as tmp_dir:
input_bam_path = self.create_bam(tmp_dir.path,
"input.sam",
sam_contents,
index=True)
args = Namespace(input_bam=input_bam_path, force=True)
validator._check_input_bam_consistent_length(args, self.mock_logger)
self.ok()
warnings = self.mock_logger._log_calls['WARNING']
self.assertEqual(1, len(warnings))
regex = (r'\[.*input.bam\] reads appear to have inconsistent '
r'sequence lengths\..*forcing')
self.assertRegexpMatches(warnings[0], regex)
def test_check_overwrite_output_ok(self):
with TempDirectory() as tmp_dir:
# tmp_dir.write('input.bam', b'foo')
deduped_bam_path = os.path.join(tmp_dir.path, 'deduped.bam')
annotated_bam_path = os.path.join(tmp_dir.path, 'annotated.bam')
args = Namespace(output_bam=deduped_bam_path,
annotated_output_bam=annotated_bam_path,
force=False)
validator._check_overwrite_output(args, self.mock_logger)
self.ok()
self.assertEqual(0, len(self.mock_logger._log_calls))
def test_check_overwrite_output_raisesUsageErrorIfDedupedPresent(self):
with TempDirectory() as tmp_dir:
tmp_dir.write('deduped.bam', b'foo')
deduped_bam_path = os.path.join(tmp_dir.path, 'deduped.bam')
annotated_bam_path = os.path.join(tmp_dir.path, 'annotated.bam')
args = Namespace(output_bam=deduped_bam_path,
annotated_output_bam=annotated_bam_path,
force=False)
self.assertRaisesRegexp(utils.UsageError,
r'\[.*deduped.bam\] exist.*force',
validator._check_overwrite_output,
args,
self.mock_logger)
self.assertEqual(0, len(self.mock_logger._log_calls))
def test_check_overwrite_output_raisesUsageErrorIfAnnotatedPresent(self):
with TempDirectory() as tmp_dir:
tmp_dir.write('annotated.bam', b'foo')
deduped_bam_path = os.path.join(tmp_dir.path, 'deduped.bam')
annotated_bam_path = os.path.join(tmp_dir.path, 'annotated.bam')
args = Namespace(output_bam=deduped_bam_path,
annotated_output_bam=annotated_bam_path,
force=False)
self.assertRaisesRegexp(utils.UsageError,
r'\[.*annotated.bam\] exist.*force',
validator._check_overwrite_output,
args,
self.mock_logger)
self.assertEqual(0, len(self.mock_logger._log_calls))
def test_check_overwrite_output_raisesUsageErrorIfBothPresent(self):
with TempDirectory() as tmp_dir:
tmp_dir.write('deduped.bam', b'foo')
tmp_dir.write('annotated.bam', b'bar')
deduped_bam_path = os.path.join(tmp_dir.path, 'deduped.bam')
annotated_bam_path = os.path.join(tmp_dir.path, 'annotated.bam')
args = Namespace(output_bam=deduped_bam_path,
annotated_output_bam=annotated_bam_path,
force=False)
regex = r'\[.*deduped.bam, .*annotated.bam\] exist.*force'
self.assertRaisesRegexp(utils.UsageError,
regex,
validator._check_overwrite_output,
args,
self.mock_logger)
self.assertEqual(0, len(self.mock_logger._log_calls))
def test_check_overwrite_output_warnIfForced(self):
with TempDirectory() as tmp_dir:
tmp_dir.write('deduped.bam', b'foo')
tmp_dir.write('annotated.bam', b'bar')
deduped_bam_path = os.path.join(tmp_dir.path, 'deduped.bam')
annotated_bam_path = os.path.join(tmp_dir.path, 'annotated.bam')
args = Namespace(output_bam=deduped_bam_path,
annotated_output_bam=annotated_bam_path,
force=True)
validator._check_overwrite_output(args, self.mock_logger)
warnings = self.mock_logger._log_calls['WARNING']
regex = r'\[.*deduped.bam, .*annotated.bam\] exist.*forcing'
self.assertEqual(1, len(warnings))
self.assertRegexpMatches(warnings[0], regex)
| apache-2.0 | -7,815,077,713,461,950,000 | 48.417949 | 96 | 0.532325 | false |
bsilverthorn/qy | src/qy/test/test_language.py | 1 | 8791 | """
@author: Bryan Silverthorn <[email protected]>
"""
import math
import numpy
import qy
from nose.tools import (
assert_true,
assert_false,
assert_equal,
assert_raises,
assert_almost_equal,
)
from qy import (
emit_and_execute,
Object,
)
def test_qy_python_no_arguments():
"""
Test the python() LLVM construct without arguments.
"""
executed = [False]
@emit_and_execute()
def _():
@qy.python()
def _():
executed[0] = [True]
assert_true(executed[0])
def test_qy_python_arguments():
"""
Test the python() LLVM construct with arguments.
"""
values = []
@emit_and_execute()
def _():
@qy.for_(8)
def _(i):
@qy.python(i)
def _(j):
values.append(j)
assert_equal(values, range(8))
def test_qy_python_exception():
"""
Test exception handling in the python() LLVM construct.
"""
class ExpectedException(Exception):
pass
def should_raise():
@emit_and_execute()
def _():
@qy.python()
def _():
raise ExpectedException()
assert_raises(ExpectedException, should_raise)
def test_qy_python_exception_short_circuiting():
"""
Test short-circuiting of exceptions in the python() LLVM construct.
"""
class ExpectedException(Exception):
pass
def should_raise():
@emit_and_execute()
def _():
@qy.python()
def _():
raise ExpectedException()
@qy.python()
def _():
assert_true(False, "control flow was not short-circuited")
assert_raises(ExpectedException, should_raise)
def test_qy_if_():
"""
Test the qy-LLVM if_() construct.
"""
bad = [True]
@emit_and_execute()
def _():
@qy.if_(True)
def _():
@qy.python()
def _():
del bad[:]
assert_false(bad)
@emit_and_execute()
def _():
@qy.if_(False)
def _():
@qy.python()
def _():
assert_true(False)
def test_qy_if_else():
"""
Test the qy-LLVM if_else() construct.
"""
bad = [True]
@emit_and_execute()
def _():
@qy.if_else(True)
def _(then):
if then:
@qy.python()
def _():
del bad[:]
else:
@qy.python()
def _():
assert_true(False)
assert_false(bad)
bad = [True]
@emit_and_execute()
def _():
@qy.if_else(False)
def _(then):
if then:
@qy.python()
def _():
assert_true(False)
else:
@qy.python()
def _():
del bad[:]
assert_false(bad)
def test_qy_for_():
"""
Test the qy-LLVM for_() loop construct.
"""
count = 128
iterations = [0]
@emit_and_execute()
def _():
@qy.for_(count)
def _(_):
@qy.python()
def _():
iterations[0] += 1
assert_equal(iterations[0], count)
def test_qy_break_():
"""
Test the qy break_() statement.
"""
count = 64
iterations = [0]
@emit_and_execute()
def _():
@qy.for_(count * 2)
def _(i):
@qy.python()
def _():
iterations[0] += 1
@qy.if_(i == count - 1)
def _():
qy.break_()
assert_equal(iterations[0], count)
def test_qy_object_basics():
"""
Test basic operations on LLVM-wrapped Python objects.
"""
result = [None]
text = "testing"
def do_function(string_py):
result[0] = string_py
@emit_and_execute()
def _():
do = Object.from_object(do_function)
string = Object.from_string(text)
do(string)
assert_equal(result, [text])
def test_qy_py_print():
"""
Test the py_print() LLVM construct with arguments.
"""
import sys
from cStringIO import StringIO
old_stdout = sys.stdout
try:
new_stdout = StringIO()
sys.stdout = new_stdout
@emit_and_execute()
def _():
qy.py_print("test text\n")
finally:
sys.stdout = old_stdout
assert_equal(new_stdout.getvalue(), "test text\n")
def test_qy_py_printf():
"""
Test the py_printf() LLVM construct with arguments.
"""
import sys
from cStringIO import StringIO
old_stdout = sys.stdout
try:
new_stdout = StringIO()
sys.stdout = new_stdout
@emit_and_execute()
def _():
@qy.for_(8)
def _(i):
qy.py_printf("i = %i\n", i)
finally:
sys.stdout = old_stdout
assert_equal(
new_stdout.getvalue(),
"".join("i = %i\n" % i for i in xrange(8)),
)
def test_qy_nested_for_():
"""
Test the qy-LLVM for_() loop construct, nested.
"""
count = 32
iterations = [0]
@emit_and_execute()
def _():
@qy.for_(count)
def _(_):
@qy.for_(count)
def _(_):
@qy.python()
def _():
iterations[0] += 1
assert_equal(iterations[0], count**2)
def test_qy_assert_():
"""
Test the qy-LLVM assert_() construct.
"""
# should not raise
@emit_and_execute()
def _():
qy.assert_(True)
# should raise
from qy import EmittedAssertionError
def should_raise():
@emit_and_execute()
def _():
qy.assert_(False)
assert_raises(EmittedAssertionError, should_raise)
def test_qy_random():
"""
Test the qy-LLVM random() construct.
"""
count = 4096
total = [0.0]
@emit_and_execute()
def _():
@qy.for_(count)
def _(_):
v = qy.random()
@qy.python(v)
def _(v_py):
total[0] += v_py
assert_almost_equal(total[0] / count, 0.5, places = 1)
def test_qy_random_int():
"""
Test the qy-LLVM random_int() construct.
"""
count = 32
values = []
@emit_and_execute()
def _():
@qy.for_(count)
def _(_):
v = qy.random_int(2)
@qy.python(v)
def _(v_py):
values.append(v_py)
assert_true(len(filter(None, values)) > 8)
assert_true(len(filter(None, values)) < 24)
def test_qy_select():
"""
Test the select() LLVM construct without arguments.
"""
result = [None, None]
@emit_and_execute()
def _():
v0 = qy.select(True, 3, 4)
v1 = qy.select(False, 3, 4)
@qy.python(v0, v1)
def _(v0_py, v1_py):
result[0] = v0_py
result[1] = v1_py
assert_equal(result[0], 3)
assert_equal(result[1], 4)
def test_qy_is_nan():
"""
Test LLVM real-value is_nan property.
"""
@emit_and_execute()
def _():
a = qy.value_from_any(-0.000124992188151).is_nan
b = qy.value_from_any(numpy.nan).is_nan
@qy.python(a, b)
def _(a_py, b_py):
assert_false(a_py)
assert_true(b_py)
def test_qy_log():
"""
Test the LLVM log() intrinsic wrapper.
"""
@emit_and_execute()
def _():
v0 = qy.log(math.e)
@qy.python(v0)
def _(v0_py):
assert_equal(v0_py, 1.0)
def test_qy_log1p():
"""
Test the LLVM log1p() construct.
"""
@emit_and_execute()
def _():
v0 = qy.log1p(math.e - 1.0)
@qy.python(v0)
def _(v0_py):
assert_equal(v0_py, 1.0)
def test_qy_exp():
"""
Test the LLVM exp() intrinsic wrapper.
"""
@emit_and_execute()
def _():
v0 = qy.exp(1.0)
@qy.python(v0)
def _(v0_py):
assert_equal(v0_py, math.e)
def test_qy_real_neg():
"""
Test the floating-point negation operation.
"""
@emit_and_execute()
def _():
x = qy.value_from_any(3)
y = qy.value_from_any(-5)
@qy.python(-x, -y)
def _(a_py, b_py):
assert_equal(a_py, -3)
assert_equal(b_py, 5)
def test_qy_integer_mod():
"""
Test the integer modulo operation.
"""
@emit_and_execute()
def _():
x = qy.value_from_any(3)
y = qy.value_from_any(5)
z = qy.value_from_any(-2)
@qy.python(x % y, y % z, z % y)
def _(a_py, b_py, c_py):
assert_equal(a_py, 3)
assert_equal(b_py, 2)
assert_equal(c_py, -2)
| mit | 2,407,731,293,992,260,000 | 18.449115 | 74 | 0.475145 | false |
pengzhangdev/PokemonGo-Bot | pokemongo_bot/cell_workers/evolve_pokemon.py | 1 | 7211 | from random import uniform
from pokemongo_bot import inventory
from pokemongo_bot.human_behaviour import sleep
from pokemongo_bot.inventory import Pokemon
from pokemongo_bot.item_list import Item
from pokemongo_bot.base_task import BaseTask
class EvolvePokemon(BaseTask):
SUPPORTED_TASK_API_VERSION = 1
def __init__(self, bot, config):
super(EvolvePokemon, self).__init__(bot, config)
def initialize(self):
self.api = self.bot.api
self.evolve_list = self.config.get('evolve_list', [])
self.donot_evolve_list = self.config.get('donot_evolve_list', [])
self.min_evolve_speed = self.config.get('min_evolve_speed', 25)
self.max_evolve_speed = self.config.get('max_evolve_speed', 30)
self.first_evolve_by = self.config.get('first_evolve_by', 'cp')
self.evolve_above_cp = self.config.get('evolve_above_cp', 500)
self.evolve_above_iv = self.config.get('evolve_above_iv', 0.8)
self.cp_iv_logic = self.config.get('logic', 'or')
self.use_lucky_egg = self.config.get('use_lucky_egg', False)
self._validate_config()
def _validate_config(self):
if isinstance(self.evolve_list, basestring):
self.evolve_list = [str(pokemon_name).strip().lower() for pokemon_name in self.evolve_list.split(',')]
if isinstance(self.donot_evolve_list, basestring):
self.donot_evolve_list = [str(pokemon_name).strip().lower() for pokemon_name in self.donot_evolve_list.split(',')]
if 'evolve_speed' in self.config:
self.logger.warning("evolve_speed is deprecated, instead please use 'min_evolve_speed' and 'max_evolved_speed'.")
if 'evolve_all' in self.config:
self.logger.warning("evolve_all is deprecated, instead please use 'evolve_list' and 'donot_evolve_list'.")
def work(self):
if not self._should_run():
return
filtered_list = self._sort_and_filter()
if (len(self.evolve_list) > 0) and self.evolve_list[0] != 'all':
filtered_list = filter(lambda x: x.name.lower() in self.evolve_list, filtered_list)
if (len(self.donot_evolve_list) > 0) and self.donot_evolve_list[0] != 'none':
filtered_list = filter(lambda pokemon: pokemon.name.lower() not in self.donot_evolve_list, filtered_list)
cache = {}
for pokemon in filtered_list:
if pokemon.can_evolve_now():
self._execute_pokemon_evolve(pokemon, cache)
def _should_run(self):
if not self.evolve_list or self.evolve_list[0] == 'none':
return False
# Evolve all is used - Use Lucky egg only at the first tick
if self.bot.tick_count is not 1 or not self.use_lucky_egg:
return True
lucky_egg = inventory.items().get(Item.ITEM_LUCKY_EGG.value)
# Make sure the user has a lucky egg and skip if not
if lucky_egg.count > 0:
response_dict_lucky_egg = self.bot.use_lucky_egg()
if response_dict_lucky_egg:
result = response_dict_lucky_egg.get('responses', {}).get('USE_ITEM_XP_BOOST', {}).get('result', 0)
if result is 1: # Request success
lucky_egg.remove(1)
self.emit_event(
'used_lucky_egg',
formatted='Used lucky egg ({amount_left} left).',
data={
'amount_left': lucky_egg.count
}
)
return True
else:
self.emit_event(
'lucky_egg_error',
level='error',
formatted='Failed to use lucky egg!'
)
return False
else:
# Skipping evolve so they aren't wasted
self.emit_event(
'skip_evolve',
formatted='Skipping evolve because has no lucky egg.'
)
return False
def _sort_and_filter(self):
pokemons = []
logic_to_function = {
'or': lambda pokemon: pokemon.cp >= self.evolve_above_cp or pokemon.iv >= self.evolve_above_iv,
'and': lambda pokemon: pokemon.cp >= self.evolve_above_cp and pokemon.iv >= self.evolve_above_iv
}
for pokemon in inventory.pokemons().all():
if pokemon.unique_id > 0 and pokemon.has_next_evolution() and (logic_to_function[self.cp_iv_logic](pokemon)):
pokemons.append(pokemon)
if self.first_evolve_by == "cp":
pokemons.sort(key=lambda x: (x.pokemon_id, x.cp, x.iv), reverse=True)
else:
pokemons.sort(key=lambda x: (x.pokemon_id, x.iv, x.cp), reverse=True)
return pokemons
def _execute_pokemon_evolve(self, pokemon, cache):
if pokemon.name in cache:
return False
response_dict = self.api.evolve_pokemon(pokemon_id=pokemon.unique_id)
if response_dict.get('responses', {}).get('EVOLVE_POKEMON', {}).get('result', 0) == 1:
xp = response_dict.get("responses", {}).get("EVOLVE_POKEMON", {}).get("experience_awarded", 0)
evolution = response_dict.get("responses", {}).get("EVOLVE_POKEMON", {}).get("evolved_pokemon_data", {})
awarded_candies = response_dict.get('responses', {}).get('EVOLVE_POKEMON', {}).get('candy_awarded', 0)
candy = inventory.candies().get(pokemon.pokemon_id)
candy.consume(pokemon.evolution_cost - awarded_candies)
self.emit_event(
'pokemon_evolved',
formatted="Evolved {pokemon} [IV {iv}] [CP {cp}] [{candy} candies] [+{xp} xp]",
data={
'pokemon': pokemon.name,
'iv': pokemon.iv,
'cp': pokemon.cp,
'candy': candy.quantity,
'xp': xp,
}
)
inventory.pokemons().remove(pokemon.unique_id)
new_pokemon = inventory.Pokemon(evolution)
inventory.pokemons().add(new_pokemon)
inventory.player().exp += xp
sleep(uniform(self.min_evolve_speed, self.max_evolve_speed))
evolve_result = True
else:
# cache pokemons we can't evolve. Less server calls
cache[pokemon.name] = 1
sleep(0.7)
evolve_result = False
with self.bot.database as conn:
c = conn.cursor()
c.execute("SELECT COUNT(name) FROM sqlite_master WHERE type='table' AND name='evolve_log'")
result = c.fetchone()
while True:
if result[0] == 1:
conn.execute('''INSERT INTO evolve_log (pokemon, iv, cp) VALUES (?, ?, ?)''', (pokemon.name, pokemon.iv, pokemon.cp))
break
else:
self.emit_event(
'evolve_log',
sender=self,
level='info',
formatted="evolve_log table not found, skipping log"
)
break
return evolve_result
| mit | -3,628,386,126,115,556,400 | 40.682081 | 133 | 0.554431 | false |
blstream/ut-arena | ut_arena_py_api/ut_arena/settings.py | 1 | 3193 | """
Django settings for ut_arena_py_api project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!2stj*=!93mhvadu7moo(^ak6(jkl&(y*%q59l=7qj(5+n*-r)'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'apps.utarena',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ut_arena.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ut_arena.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
# Rest settings
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'PAGE_SIZE': 10,
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
)
}
| apache-2.0 | 7,903,634,078,388,148,000 | 26.765217 | 91 | 0.68932 | false |
brean/python-pathfinding | pathfinding/finder/finder.py | 1 | 6586 | # -*- coding: utf-8 -*-
import heapq # used for the so colled "open list" that stores known nodes
import time # for time limitation
from pathfinding.core.util import SQRT2
from pathfinding.core.diagonal_movement import DiagonalMovement
# max. amount of tries we iterate until we abort the search
MAX_RUNS = float('inf')
# max. time after we until we abort the search (in seconds)
TIME_LIMIT = float('inf')
# used for backtrace of bi-directional A*
BY_START = 1
BY_END = 2
class ExecutionTimeException(Exception):
def __init__(self, message):
super(ExecutionTimeException, self).__init__(message)
class ExecutionRunsException(Exception):
def __init__(self, message):
super(ExecutionRunsException, self).__init__(message)
class Finder(object):
def __init__(self, heuristic=None, weight=1,
diagonal_movement=DiagonalMovement.never,
weighted=True,
time_limit=TIME_LIMIT,
max_runs=MAX_RUNS):
"""
find shortest path
:param heuristic: heuristic used to calculate distance of 2 points
(defaults to manhattan)
:param weight: weight for the edges
:param diagonal_movement: if diagonal movement is allowed
(see enum in diagonal_movement)
:param weighted: the algorithm supports weighted nodes
(should be True for A* and Dijkstra)
:param time_limit: max. runtime in seconds
:param max_runs: max. amount of tries until we abort the search
(optional, only if we enter huge grids and have time constrains)
<=0 means there are no constrains and the code might run on any
large map.
"""
self.time_limit = time_limit
self.max_runs = max_runs
self.weighted = weighted
self.diagonal_movement = diagonal_movement
self.weight = weight
self.heuristic = heuristic
def calc_cost(self, node_a, node_b):
"""
get the distance between current node and the neighbor (cost)
"""
if node_b.x - node_a.x == 0 or node_b.y - node_a.y == 0:
# direct neighbor - distance is 1
ng = 1
else:
# not a direct neighbor - diagonal movement
ng = SQRT2
# weight for weighted algorithms
if self.weighted:
ng *= node_b.weight
return node_a.g + ng
def apply_heuristic(self, node_a, node_b, heuristic=None):
"""
helper function to apply heuristic
"""
if not heuristic:
heuristic = self.heuristic
return heuristic(
abs(node_a.x - node_b.x),
abs(node_a.y - node_b.y))
def find_neighbors(self, grid, node, diagonal_movement=None):
'''
find neighbor, same for Djikstra, A*, Bi-A*, IDA*
'''
if not diagonal_movement:
diagonal_movement = self.diagonal_movement
return grid.neighbors(node, diagonal_movement=diagonal_movement)
def keep_running(self):
"""
check, if we run into time or iteration constrains.
:returns: True if we keep running and False if we run into a constraint
"""
if self.runs >= self.max_runs:
raise ExecutionRunsException(
'{} run into barrier of {} iterations without '
'finding the destination'.format(
self.__class__.__name__, self.max_runs))
if time.time() - self.start_time >= self.time_limit:
raise ExecutionTimeException(
'{} took longer than {} seconds, aborting!'.format(
self.__class__.__name__, self.time_limit))
def process_node(self, node, parent, end, open_list, open_value=True):
'''
we check if the given node is path of the path by calculating its
cost and add or remove it from our path
:param node: the node we like to test
(the neighbor in A* or jump-node in JumpPointSearch)
:param parent: the parent node (the current node we like to test)
:param end: the end point to calculate the cost of the path
:param open_list: the list that keeps track of our current path
:param open_value: needed if we like to set the open list to something
else than True (used for bi-directional algorithms)
'''
# calculate cost from current node (parent) to the next node (neighbor)
ng = self.calc_cost(parent, node)
if not node.opened or ng < node.g:
node.g = ng
node.h = node.h or \
self.apply_heuristic(node, end) * self.weight
# f is the estimated total cost from start to goal
node.f = node.g + node.h
node.parent = parent
if not node.opened:
heapq.heappush(open_list, node)
node.opened = open_value
else:
# the node can be reached with smaller cost.
# Since its f value has been updated, we have to
# update its position in the open list
open_list.remove(node)
heapq.heappush(open_list, node)
def check_neighbors(self, start, end, grid, open_list,
open_value=True, backtrace_by=None):
"""
find next path segment based on given node
(or return path if we found the end)
:param start: start node
:param end: end node
:param grid: grid that stores all possible steps/tiles as 2D-list
:param open_list: stores nodes that will be processed next
"""
raise NotImplementedError(
'Please implement check_neighbors in your finder')
def find_path(self, start, end, grid):
"""
find a path from start to end node on grid by iterating over
all neighbors of a node (see check_neighbors)
:param start: start node
:param end: end node
:param grid: grid that stores all possible steps/tiles as 2D-list
:return:
"""
self.start_time = time.time() # execution time limitation
self.runs = 0 # count number of iterations
start.opened = True
open_list = [start]
while len(open_list) > 0:
self.runs += 1
self.keep_running()
path = self.check_neighbors(start, end, grid, open_list)
if path:
return path, self.runs
# failed to find path
return [], self.runs
| mit | -7,266,928,208,447,150,000 | 35.588889 | 79 | 0.58928 | false |
django-id/website | app_author/models.py | 1 | 2195 | from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
# CUSTOM FILE SIZE VALIDATOR
def validate_image(fieldfile_obj):
"""
Limit image size upload
"""
filesize = fieldfile_obj.file.size
megabyte_limit = 0.5
if filesize > megabyte_limit*1024*1024:
raise ValidationError("Max file size is %sMB" % str(megabyte_limit))
class Profile(models.Model):
"""
Author Model
"""
user = models.OneToOneField(
User,
on_delete=models.CASCADE
)
profile_picture = models.ImageField(
upload_to='images/%Y/%m/%d',
validators=[validate_image],
blank=True,
null=True
)
profile_name = models.CharField(
verbose_name='Name',
null=True,
blank=True,
max_length=50
)
profile_email = models.EmailField(
verbose_name='Email Address',
null=True,
blank=True
)
profile_location = models.CharField(
verbose_name='Origin/City',
null=True,
blank=True,
max_length=50
)
profile_github = models.URLField(
verbose_name='Github URL',
null=True,
blank=True
)
slug = models.SlugField()
is_created = models.DateTimeField(
null=True,
blank=True
)
is_moderator = models.BooleanField(
default=False,
)
def __str__(self):
return str(self.user)
def save(self, **kwargs):
if not self.slug:
from djangoid.utils import get_unique_slug
self.slug = get_unique_slug(instance=self, field='profile_name')
super(Profile, self).save(**kwargs)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
"""
Automatically Create User when Login
"""
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
"""
Automatically Create User when Login
"""
instance.profile.save()
| mit | -1,107,724,903,978,328,600 | 21.397959 | 76 | 0.618223 | false |
mhl/mysociety-cvs | sitestats/pylib/sitestats/backports/contrib/auth/middleware.py | 1 | 2933 | from django.contrib import auth
from django.core.exceptions import ImproperlyConfigured
class RemoteUserMiddleware(object):
"""
Middleware for utilizing web-server-provided authentication.
If request.user is not authenticated, then this middleware attempts to
authenticate the username passed in the ``REMOTE_USER`` request header.
If authentication is successful, the user is automatically logged in to
persist the user in the session.
The header used is configurable and defaults to ``REMOTE_USER``. Subclass
this class and change the ``header`` attribute if you need to use a
different header.
"""
# Name of request header to grab username from. This will be the key as
# used in the request.META dictionary, i.e. the normalization of headers to
# all uppercase and the addition of "HTTP_" prefix apply.
header = "REMOTE_USER"
def process_request(self, request):
# AuthenticationMiddleware is required so that request.user exists.
if not hasattr(request, 'user'):
raise ImproperlyConfigured(
"The Django remote user auth middleware requires the"
" authentication middleware to be installed. Edit your"
" MIDDLEWARE_CLASSES setting to insert"
" 'django.contrib.auth.middleware.AuthenticationMiddleware'"
" before the RemoteUserMiddleware class.")
try:
username = request.META[self.header]
except KeyError:
# If specified header doesn't exist then return (leaving
# request.user set to AnonymousUser by the
# AuthenticationMiddleware).
return
# If the user is already authenticated and that user is the user we are
# getting passed in the headers, then the correct user is already
# persisted in the session and we don't need to continue.
if request.user.is_authenticated():
if request.user.username == self.clean_username(username, request):
return
# We are seeing this user for the first time in this session, attempt
# to authenticate the user.
user = auth.authenticate(remote_user=username)
if user:
# User is valid. Set request.user and persist user in the session
# by logging the user in.
request.user = user
auth.login(request, user)
def clean_username(self, username, request):
"""
Allows the backend to clean the username, if the backend defines a
clean_username method.
"""
backend_str = request.session[auth.BACKEND_SESSION_KEY]
backend = auth.load_backend(backend_str)
try:
username = backend.clean_username(username)
except AttributeError: # Backend has no clean_username method.
pass
return username
| agpl-3.0 | -5,813,389,405,922,956,000 | 42.776119 | 79 | 0.650869 | false |
perfidia/seleshot | doc/gen_api.py | 1 | 3756 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import os
import sys
import string
sys.path.append('../src')
import seleshot
TEMPLATE = """===
API
===
"""
OUTPUT = os.path.join("_static", "api.txt")
# from http://legacy.python.org/dev/peps/pep-0257/
def trim(docstring):
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxint
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxint:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return '\n'.join(trimmed)
def fmt(doc, indent = 8):
return "\n".join([" " * indent + i for i in trim(doc).split("\n")])
if __name__ == '__main__':
print "Generating...",
s = seleshot.create()
s.driver.get("http://example.com")
i = s.get_screen()
fd = open(OUTPUT, "w")
###########################################################################
fd.write(TEMPLATE)
fd.write(" " * 0 + ".. autofunction:: seleshot.create")
fd.write("\n\n")
fd.write(" " * 0 + ".. class:: ScreenShot(object):")
fd.write("\n\n")
fd.write(" " * 4 + ".. function:: get_screen(self, url = None):\n\n")
fd.write(fmt(s.get_screen.__doc__))
fd.write("\n\n")
fd.write(" " * 4 + ".. function:: close(self):\n\n")
fd.write(fmt(s.close.__doc__))
fd.write("\n\n")
fd.write(" " * 0 + ".. class:: ImageContainer(object):\n\n")
fd.write(fmt(i.__doc__))
fd.write("\n\n")
fd.write(" " * 4 + ".. function:: cut_element(self, id = None, xpath = None):\n\n")
fd.write(fmt(i.cut_element.__doc__))
fd.write("\n\n")
fd.write(" " * 4 + ".. function:: cut_area(self, x = 0, y = 0, height = None, width = None):\n\n")
fd.write(fmt(i.cut_area.__doc__))
fd.write("\n\n")
fd.write(" " * 4 + ".. function:: draw_dot(self, id = None, xpath = None, coordinates = None, padding = 0, color = None, size = None):\n\n")
fd.write(fmt(i.draw_dot.__doc__))
fd.write("\n\n")
fd.write(" " * 4 + ".. function:: draw_frame(self, id = None, xpath = None, coordinates = None, padding = None, color = None, size = None):\n\n")
fd.write(fmt(i.draw_frame.__doc__))
fd.write("\n\n")
fd.write(" " * 4 + ".. function:: draw_image(self, id = None, xpath = None, coordinates = None, position = Position.MIDDLE, padding = (0, 0), filename = None, image = None):\n\n")
fd.write(fmt(i.draw_image.__doc__))
fd.write("\n\n")
fd.write(" " * 4 + ".. function:: draw_zoom(self, id = None, xpath = None, coordinates = None, position = Position.MIDDLE, padding = (0, 0), zoom = None):\n\n")
fd.write(fmt(i.draw_zoom.__doc__))
fd.write("\n\n")
fd.write(" " * 4 + ".. function:: draw_blur(self, id = None, xpath = None):\n\n")
fd.write(fmt(i.draw_blur.__doc__))
fd.write("\n\n")
fd.write(" " * 4 + ".. function:: save(self, filename):\n\n")
fd.write(fmt(i.save.__doc__))
fd.write("\n\n")
fd.write(" " * 4 + ".. function:: is_cut(self):\n\n")
fd.write(fmt(i.is_cut.__doc__))
fd.write("\n\n")
##########################################################################
fd.close()
s.close()
print "done"
| mit | 3,779,118,023,028,133,400 | 28.809524 | 183 | 0.536741 | false |
nanshihui/PocCollect | component/JDWP/JDWPvul.py | 1 | 2106 | #!/usr/bin/env python
# encoding: utf-8
from t import T
import os
import platform
import subprocess
import signal
import time
import requests,urllib2,json,urlparse
class TimeoutError(Exception):
pass
def command(cmd, timeout=60):
"""Run command and return the output
cmd - the command to run
timeout - max seconds to wait for
"""
is_linux = platform.system() == 'Linux'
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid if is_linux else None)
if timeout==0:
return p.stdout.read()
t_beginning = time.time()
seconds_passed = 0
while True:
if p.poll() is not None:
break
seconds_passed = time.time() - t_beginning
if timeout and seconds_passed > timeout:
if is_linux:
os.killpg(p.pid, signal.SIGTERM)
else:
p.terminate()
raise TimeoutError(cmd, timeout)
time.sleep(0.1)
return p.stdout.read()
class P(T):
def __init__(self):
T.__init__(self)
def verify(self,head='',context='',ip='',port='',productname={},keywords='',hackinfo=''):
result = {}
result['result']=False
usecommand='python '+os.path.split(os.path.realpath(__file__))[0]+'/script/jdwpshellifier.py -t '+ip+' -p '+port
try:
print usecommand
msgresult = command(usecommand, timeout=40)
print msgresult
if 'Command successfully executed' in msgresult:
result['result']=True
result['VerifyInfo'] = {}
result['VerifyInfo']['type']='Java Debug Wire Protocol vul'
result['VerifyInfo']['URL'] =ip+':'+port
result['VerifyInfo']['payload']='Java Debug Wire Protocol poc'
result['VerifyInfo']['result'] =msgresult
else:
pass
except Exception,e:
print e.text
finally:
return result
if __name__ == '__main__':
print P().verify(ip='120.24.243.216',port='8001')
| mit | -8,082,791,348,985,507,000 | 31.4 | 135 | 0.57265 | false |
jaor/bigmler | bigmler/tests/test_15_delete_dir.py | 1 | 2579 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2015-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Testing delete subcommand, --from-dir option
"""
from bigmler.tests.world import (world, common_setup_module,
common_teardown_module, teardown_class)
import bigmler.tests.delete_subcommand_steps as test_delete
def setup_module():
"""Setup for the module
"""
common_setup_module()
def teardown_module():
"""Teardown for the module
"""
common_teardown_module()
class TestDeleteDir(object):
def teardown(self):
"""Calling generic teardown for every method
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
teardown_class()
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def test_scenario1(self):
"""
Scenario: Sucessfully deleting resources from a directory:
Given I store the number of existing resources
And I create BigML resources uploading train "<data>" storing results in "<output_dir>"
And I check that the number of resources has changed
And I delete the resources from the output directory
Then the number of resources has not changed
Examples:
| data | output_dir
| ../data/iris.csv | ./scenario_del_10
"""
print(self.test_scenario1.__doc__)
examples = [
['data/iris.csv', 'scenario_del_10']]
for example in examples:
print("\nTesting with:\n", example)
test_delete.i_store_the_number_of_resources(self)
test_delete.i_create_source_from_file(self, data=example[0], output_dir=example[1])
test_delete.i_check_changed_number_of_resources(self)
test_delete.i_delete_resources_from_dir(self)
test_delete.i_check_equal_number_of_resources(self)
| apache-2.0 | -8,138,172,125,241,268,000 | 32.493506 | 103 | 0.609539 | false |
glmcdona/meddle | examples/example_deviceiocontrol/processes.py | 1 | 1377 | from process_base import *
from targets import *
import subprocess
import os
class ProcessDeviceIo(ProcessBase):
def __init__(self, Controller, crashdump_folder, breakpoint_handler, pid, ph, unique_identifier, verbose, logger):
# Specific options
self.path_to_exe = b"C:\\Windows\\System32\\notepad.exe"
self.command_line = b"notepad.exe"
self.logger = logger
# Initialize
self.initialize(Controller, self.__class__.__name__, crashdump_folder, breakpoint_handler, pid, ph, unique_identifier, verbose)
def on_debugger_attached(self, Engine):
# Set the types
self.Engine = Engine
self.types = meddle_types(Engine)
# Add the targets
Engine.AddTarget(Target_Handles)
Engine.AddTarget(Target_DeviceIoControl)
# Handle process loaded
Engine.HandleProcessLoaded()
# Start an auto-it script
try:
subprocess.Popen(['autoit3.exe', os.path.join(os.path.dirname(__file__), "..", "autoit", "notepad_print.au3"), str(self.pid), ">nul"], shell=True)
except:
print "Warning: autoit3.exe not found on path. Please install it and add it to path to increase the attack surface."
# Resume the process that we created suspended. This is called just after the debugger has been attached.
if self.start_th >= 0:
windll.kernel32.ResumeThread(self.start_th);
def log_csv(self, fields):
self.logger.log_event(fields)
| mit | 4,168,101,267,918,378,500 | 28.319149 | 149 | 0.713145 | false |
building39/nebula2 | scripts/cdmi_explorer/CDMIMain/handlers.py | 1 | 2951 | '''
Created on Jun 9, 2013
@author: mmartin
'''
import sys
from gi.repository import Gtk
from CDMIAbout import CDMIAbout
from CDMIConnect import CDMIConnect
from CDMIHelp import CDMIHelp
class Handlers(object):
'''
classdocs
'''
def __init__(self, session):
self.session = session
def onAbout(self, *args):
CDMIAbout(self.session)
def onConnect(self, *args):
CDMIConnect(self.session)
def onDeleteWindow(self, *args):
self.onQuit(*args)
def onHelp(self, *args):
CDMIHelp(self.session)
def onQuit(self, *args):
Gtk.main_quit()
def onCDMIRowCollapsed(self, *args):
treeview = args[0]
treeiter = args[1]
treepath = args[2]
model = treeview.get_model()
data = self.session.GET(model[treeiter][1])
self.session.get_children(treeview, treepath, data)
self.session.display_cdmi_data(data)
def onCDMIRowExpanded(self, *args):
treeview = args[0]
treeiter = args[1]
treepath = args[2]
rowname = self._squash_slashes(self.session.cdmimodel.get_value(treeiter, 1))
data = self.session.GET(rowname)
treeiter = self.session.cdmimodel.get_iter(treepath)
model = treeview.get_model()
prefix = rowname
if model.iter_has_child(treeiter):
num_children = model.iter_n_children(treeiter)
for i in range(num_children):
if not data:
break
child = data['children'][i]
childpath = self._squash_slashes('%s/%s' % (prefix, child))
childdata = self.session.GET(childpath)
childiter = model.iter_nth_child(treeiter, i)
self.session.get_children(treeview,
model.get_path(childiter),
childdata)
self.session.display_cdmi_data(data)
return
def onCDMIRowActivated(self, *args):
'''
Display the CDMI data for the selected row.
'''
treeview = args[0]
treepath = args[1]
_column = args[2]
model = treeview.get_model()
treeiter = model.get_iter(treepath)
data = self.session.GET(model[treeiter][1])
self.session.get_children(treeview, treepath, data)
self.session.display_cdmi_data(data)
def onSelectCursorRow(self, *args):
print 'onSelectCursorRow args: %s' % args
sys.stdout.flush()
def onCursorChanged(self, *args):
print 'onCursorChanged args: %s' % args
sys.stdout.flush()
def _squash_slashes(self, S):
T = ""
for i in range(len(S)):
try:
if S[i] == '/' and S[i+1] == '/':
i += 1
continue
T = T + S[i]
except:
T = T + S[i]
return T
| apache-2.0 | -8,973,868,036,584,532,000 | 27.375 | 85 | 0.54795 | false |
cliburn/flow | src/plugins/statistics/summary.py | 1 | 1069 | """Provide summary statistics on data."""
from plugin import Statistics
from numpy import min, max, mean, median, std
class Summary(Statistics):
"""Plugin to display summary statistics"""
name = "Summary"
def Main(self, model):
"""Calculate summary statistics"""
self.model = model
fields = self.model.GetCurrentData().getAttr('fields')
data = self.model.GetCurrentData()[:]
low = list(min(data, axis=0))
high = list(max(data, axis=0))
mu = list(mean(data, axis=0))
med = list(median(data))
sig = list(std(data, axis=0))
self.model.NewGroup('Summary statistics')
self.model.hdf5.createArray(self.model.current_group, 'min', low)
self.model.hdf5.createArray(self.model.current_group, 'max', high)
self.model.hdf5.createArray(self.model.current_group, 'mean', mu)
self.model.hdf5.createArray(self.model.current_group, 'median', med)
self.model.hdf5.createArray(self.model.current_group, 'stdev', sig)
self.model.update()
| gpl-3.0 | -8,669,594,832,415,969,000 | 41.76 | 76 | 0.63985 | false |
mementum/backtrader | backtrader/analyzers/logreturnsrolling.py | 1 | 5020 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015-2020 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import collections
import math
import backtrader as bt
__all__ = ['LogReturnsRolling']
class LogReturnsRolling(bt.TimeFrameAnalyzerBase):
'''This analyzer calculates rolling returns for a given timeframe and
compression
Params:
- ``timeframe`` (default: ``None``)
If ``None`` the ``timeframe`` of the 1st data in the system will be
used
Pass ``TimeFrame.NoTimeFrame`` to consider the entire dataset with no
time constraints
- ``compression`` (default: ``None``)
Only used for sub-day timeframes to for example work on an hourly
timeframe by specifying "TimeFrame.Minutes" and 60 as compression
If ``None`` then the compression of the 1st data of the system will be
used
- ``data`` (default: ``None``)
Reference asset to track instead of the portfolio value.
.. note:: this data must have been added to a ``cerebro`` instance with
``addata``, ``resampledata`` or ``replaydata``
- ``firstopen`` (default: ``True``)
When tracking the returns of a ``data`` the following is done when
crossing a timeframe boundary, for example ``Years``:
- Last ``close`` of previous year is used as the reference price to
see the return in the current year
The problem is the 1st calculation, because the data has** no
previous** closing price. As such and when this parameter is ``True``
the *opening* price will be used for the 1st calculation.
This requires the data feed to have an ``open`` price (for ``close``
the standard [0] notation will be used without reference to a field
price)
Else the initial close will be used.
- ``fund`` (default: ``None``)
If ``None`` the actual mode of the broker (fundmode - True/False) will
be autodetected to decide if the returns are based on the total net
asset value or on the fund value. See ``set_fundmode`` in the broker
documentation
Set it to ``True`` or ``False`` for a specific behavior
Methods:
- get_analysis
Returns a dictionary with returns as values and the datetime points for
each return as keys
'''
params = (
('data', None),
('firstopen', True),
('fund', None),
)
def start(self):
super(LogReturnsRolling, self).start()
if self.p.fund is None:
self._fundmode = self.strategy.broker.fundmode
else:
self._fundmode = self.p.fund
self._values = collections.deque([float('Nan')] * self.compression,
maxlen=self.compression)
if self.p.data is None:
# keep the initial portfolio value if not tracing a data
if not self._fundmode:
self._lastvalue = self.strategy.broker.getvalue()
else:
self._lastvalue = self.strategy.broker.fundvalue
def notify_fund(self, cash, value, fundvalue, shares):
if not self._fundmode:
self._value = value if self.p.data is None else self.p.data[0]
else:
self._value = fundvalue if self.p.data is None else self.p.data[0]
def _on_dt_over(self):
# next is called in a new timeframe period
if self.p.data is None or len(self.p.data) > 1:
# Not tracking a data feed or data feed has data already
vst = self._lastvalue # update value_start to last
else:
# The 1st tick has no previous reference, use the opening price
vst = self.p.data.open[0] if self.p.firstopen else self.p.data[0]
self._values.append(vst) # push values backwards (and out)
def next(self):
# Calculate the return
super(LogReturnsRolling, self).next()
self.rets[self.dtkey] = math.log(self._value / self._values[0])
self._lastvalue = self._value # keep last value
| gpl-3.0 | -3,908,883,812,775,189,000 | 34.857143 | 79 | 0.60996 | false |
codilime/cloudify-agent | cloudify_agent/installer/config/decorators.py | 1 | 5377 | #########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from functools import wraps
from cloudify import ctx
from cloudify import context
from cloudify_agent.installer.config.attributes import AGENT_ATTRIBUTES
def attribute(name):
def decorator(function):
@wraps(function)
def wrapper(cloudify_agent):
# if the property was given in the invocation, use it.
# inputs are first in precedence order
if _update_agent_property(name,
props=cloudify_agent,
final_props=cloudify_agent):
return
if ctx.type == context.NODE_INSTANCE:
# if the property is inside a runtime property, use it.
# runtime properties are second in precedence order
runtime_properties = ctx.instance.runtime_properties.get(
'cloudify_agent', {})
if _update_agent_property(name,
props=runtime_properties,
final_props=cloudify_agent):
return
# if the property is declared on the node, use it
# node properties are third in precedence order
node_properties = ctx.node.properties.get(
'cloudify_agent', {})
node_properties.update(ctx.node.properties.get(
'agent_config', {}))
if _update_agent_property(name,
props=node_properties,
final_props=cloudify_agent):
return
# if the property is inside the bootstrap context,
# and its value is not None, use it
# bootstrap_context is forth in precedence order
attr = AGENT_ATTRIBUTES.get(name)
if attr is None:
raise RuntimeError('{0} is not an agent attribute'
.format(name))
agent_context = ctx.bootstrap_context.cloudify_agent.\
_cloudify_agent or {}
context_attribute = attr.get('context_attribute', name)
if _update_agent_property(context_attribute,
props=agent_context,
final_props=cloudify_agent,
final_key=name):
return
if _update_agent_property(name,
props=agent_context,
final_props=cloudify_agent):
return
# apply the function itself
ctx.logger.debug('Applying function:{0} on Attribute '
'<{1}>'.format(function.__name__, name))
value = function(cloudify_agent)
if value is not None:
ctx.logger.debug('{0} set by function:{1}'
.format(name, value))
cloudify_agent[name] = value
return
# set default value
default = attr.get('default')
if default is not None:
ctx.logger.debug('{0} set by default value'
.format(name, value))
cloudify_agent[name] = default
return
return wrapper
return decorator
def group(name):
def decorator(group_function):
@wraps(group_function)
def wrapper(cloudify_agent, *args, **kwargs):
# collect all attributes belonging to that group
group_attributes = {}
for attr_name, attr_value in AGENT_ATTRIBUTES.iteritems():
if attr_value.get('group') == name:
group_attributes[attr_name] = attr_value
for group_attr_name in group_attributes.iterkeys():
# iterate and try to set all the attributes of the group as
# defined in the heuristics of @attribute.
@attribute(group_attr_name)
def setter(_):
pass
setter(cloudify_agent)
# when we are done, invoke the group function to
# apply group logic
group_function(cloudify_agent, *args, **kwargs)
return wrapper
return decorator
def _update_agent_property(name, props, final_props, final_key=None):
final_key = final_key or name
extra_props = props.get('extra', {})
if name in extra_props:
final_props[final_key] = extra_props[name]
return True
if name in props:
final_props[final_key] = props[name]
return True
return False
| apache-2.0 | -2,597,727,802,150,284,000 | 36.340278 | 77 | 0.53766 | false |
liubenyuan/vispy-tutorial | examples/04-tetrahedron.py | 1 | 4445 | # pylint: disable=invalid-name, no-member, unused-argument
""" passing varyings to fragment """
import numpy as np
from vispy import app, gloo
from vispy.util.transforms import translate, perspective, rotate
# note the 'color' and 'v_color' in vertex
vertex = """
uniform mat4 u_model; // Model matrix
uniform mat4 u_view; // View matrix
uniform mat4 u_projection; // Projection matrix
uniform vec4 u_color; // mask color for edge plotting
attribute vec3 a_position;
attribute vec4 a_color;
varying vec4 v_color;
void main()
{
gl_Position = u_projection * u_view * u_model * vec4(a_position, 1.0);
v_color = a_color * u_color;
}
"""
# note the varying 'v_color', it must has the same name as in the vertex.
fragment = """
varying vec4 v_color;
void main()
{
gl_FragColor = v_color;
}
"""
class Canvas(app.Canvas):
""" build canvas class for this demo """
def __init__(self):
""" initialize the canvas """
app.Canvas.__init__(self,
size=(512, 512),
title='scaling quad',
keys='interactive')
# shader program
tet = gloo.Program(vert=vertex, frag=fragment)
# vertices
V = np.array([(0, 0, 0),
(1, 0, 0),
(1.0/2.0, np.sqrt(3.0)/2.0, 0),
(1.0/2.0, np.sqrt(3.0)/6.0, np.sqrt(2.0/3.0))],
dtype=np.float32)
# triangles specified by connecting matrix,
# it can also be initialized using itertools
I = np.array([(0, 1, 2),
(0, 3, 1),
(0, 2, 3),
(1, 3, 2)], dtype=np.uint32)
# edges, used for drawing outline
E = np.array([(0, 1), (1, 2), (2, 0), (1, 3), (2, 3), (0, 3)],
dtype=np.uint32)
# colors of vertices
C = np.array([(1, 0, 0, 1),
(0, 1, 0, 1),
(0, 0, 1, 1),
(1, 1, 0, 1)], dtype=np.float32)
# bind to data
tet['a_position'] = V
tet['a_color'] = C
self.I = gloo.IndexBuffer(I)
self.E = gloo.IndexBuffer(E)
# intialize transformation matrix
view = np.eye(4, dtype=np.float32)
model = np.eye(4, dtype=np.float32)
projection = np.eye(4, dtype=np.float32)
# set view
view = translate((0, 0, -5))
tet['u_model'] = model
tet['u_view'] = view
tet['u_projection'] = projection
# bind your program
self.program = tet
# config and set viewport
gloo.set_viewport(0, 0, *self.physical_size)
gloo.set_clear_color('white')
gloo.set_state('translucent')
gloo.set_polygon_offset(1.0, 1.0)
# bind a timer
self.timer = app.Timer('auto', self.on_timer)
self.theta = 0.0
self.phi = 0.0
self.timer.start()
# show the canvas
self.show()
def on_resize(self, event):
""" canvas resize callback """
ratio = event.physical_size[0] / float(event.physical_size[1])
self.program['u_projection'] = perspective(45.0, ratio, 2.0, 10.0)
gloo.set_viewport(0, 0, *event.physical_size)
def on_draw(self, event):
""" canvas update callback """
gloo.clear()
# Filled cube
gloo.set_state(blend=True, depth_test=False,
polygon_offset_fill=True)
self.program['u_color'] = [1.0, 1.0, 1.0, 0.8]
self.program.draw('triangles', self.I)
# draw outline
gloo.set_state(blend=False, depth_test=False,
polygon_offset_fill=True)
self.program['u_color'] = [0.0, 0.0, 0.0, 1.0]
self.program.draw('lines', self.E)
def on_timer(self, event):
""" canvas time-out callback """
self.theta += .5
self.phi += .5
# note the convention is, theta is applied first and then phi
# see vispy.utils.transforms,
# python is row-major and opengl is column major,
# so the rotate function transposes the output.
model = np.dot(rotate(self.theta, (0, 1, 0)),
rotate(self.phi, (0, 0, 1)))
self.program['u_model'] = model
self.update()
# Finally, we show the canvas and we run the application.
c = Canvas()
app.run()
| apache-2.0 | 7,396,573,617,910,617,000 | 30.524823 | 74 | 0.525309 | false |
seomoz/simhash-db-py | simhash_db/hbase_client.py | 1 | 3893 | #! /usr/bin/env python
'''Our code to connect to the HBase backend. It uses the happybase
package, which depends on the Thrift service that (for now) is
part of HBase.'''
from gevent import monkey
monkey.patch_all()
import struct
import happybase
import Hbase_thrift
from . import BaseClient
def column_name(integer):
'''Convert an integer to a column name.'''
return 'f%02d:c' % integer
class Client(BaseClient):
'''Our HBase backend client'''
def __init__(self, name, num_blocks, num_bits, *args, **kwargs):
BaseClient.__init__(self, name, num_blocks, num_bits)
# Time to live in seconds
ttl = kwargs.pop('ttl', None)
if ttl is None:
raise ValueError
self.connection = happybase.Connection(**kwargs)
families = {column_name(i): dict(time_to_live=ttl)
for i in range(self.num_tables)}
try:
self.connection.create_table(name, families)
except Hbase_thrift.AlreadyExists:
pass
self.table = self.connection.table(name)
def delete(self):
'''Delete this database of simhashes'''
if self.table is not None:
self.connection.delete_table(self.name, disable=True)
self.table = None
def insert(self, hash_or_hashes):
'''Insert one (or many) hashes into the database'''
if self.table is None:
return
hashes = hash_or_hashes
if not hasattr(hash_or_hashes, '__iter__'):
hashes = [hash_or_hashes]
for hsh in hashes:
for i in range(self.num_tables):
row_key = struct.pack('!Q',
long(self.corpus.tables[i].permute(hsh)))
self.table.put(row_key, {column_name(i): None})
def find_in_table(self, hsh, table_num, ranges):
'''Return all the results found in this particular table'''
low = struct.pack('!Q', ranges[table_num][0])
high = struct.pack('!Q', ranges[table_num][1])
pairs = self.table.scan(row_start=low, row_stop=high,
columns=[column_name(table_num)])
results = [struct.unpack('!Q', k)[0] for k, v in pairs]
results = [self.corpus.tables[table_num].unpermute(d)
for d in results]
return [h for h in results if
self.corpus.distance(h, hsh) <= self.num_bits]
def find_one(self, hash_or_hashes):
'''Find one near-duplicate for the provided query (or queries)'''
if self.table is None:
return None
hashes = hash_or_hashes
if not hasattr(hash_or_hashes, '__iter__'):
hashes = [hash_or_hashes]
results = []
for hsh in hashes:
ranges = self.ranges(hsh)
found = []
for i in range(self.num_tables):
found = self.find_in_table(hsh, i, ranges)
if found:
results.append(found[0])
break
if not found:
results.append(None)
if not hasattr(hash_or_hashes, '__iter__'):
return results[0]
return results
def find_all(self, hash_or_hashes):
'''Find all near-duplicates for the provided query (or queries)'''
if self.table is None:
return None
hashes = hash_or_hashes
if not hasattr(hash_or_hashes, '__iter__'):
hashes = [hash_or_hashes]
results = []
for hsh in hashes:
ranges = self.ranges(hsh)
found = []
for i in range(self.num_tables):
found.extend(self.find_in_table(hsh, i, ranges))
found = list(set(found))
results.append(found)
if not hasattr(hash_or_hashes, '__iter__'):
return results[0]
return results
| mit | 7,742,667,128,239,649,000 | 31.714286 | 79 | 0.553301 | false |
awes0menessInc/python-projects | Alien-Invasion/button.py | 1 | 1269 | import pygame.font
class Button():
""" A class to create a button. """
def __init__(self, screen, msg):
"""Initialize button attributes."""
self.screen = screen
self.screen_rect = screen.get_rect()
# Set the dimensions and properties of the button.
self.width, self.height = 200, 50
self.button_color = (0, 255, 0)
self.text_color = (255, 255, 255)
self.font = pygame.font.SysFont(None, 48)
# Build the button's rect object and center it.
self.rect = pygame.Rect(0, 0, self.width, self.height)
self.rect.center = self.screen_rect.center
# The button message needs to be prepped only once.
self.prep_msg(msg)
def prep_msg(self, msg):
"""Turn msg into a rendered image and center text on the button."""
self.msg_image = self.font.render(msg, True, self.text_color,
self.button_color)
self.msg_image_rect = self.msg_image.get_rect()
self.msg_image_rect.center = self.rect.center
def draw_button(self):
# Draw blank button and then draw message.
self.screen.fill(self.button_color, self.rect)
self.screen.blit(self.msg_image, self.msg_image_rect)
| mit | 4,828,632,466,260,825,000 | 35.257143 | 75 | 0.602049 | false |
msfrank/mandelbrot | mandelbrot/registry.py | 1 | 3001 | # Copyright 2015 Michael Frank <[email protected]>
#
# This file is part of Mandelbrot.
#
# Mandelbrot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mandelbrot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mandelbrot. If not, see <http://www.gnu.org/licenses/>.
import pkg_resources
import logging
log = logging.getLogger("mandelbrot.registry")
from mandelbrot import versionstring
require_mandelbrot = 'mandelbrot == ' + versionstring()
class Registry(object):
"""
"""
def __init__(self):
self.env = pkg_resources.Environment([])
plugins,errors = pkg_resources.working_set.find_plugins(self.env)
for plugin in plugins:
pkg_resources.working_set.add(plugin)
for error in errors:
log.info("failed to load distribution: %s", error)
self.overrides = {}
def override_factory(self, entry_point_type, factory_name, factory):
"""
:param entry_point_type:
:type entry_point_type: str
:param factory_name:
:type factory_name: str
:param factory:
:type factory: type
"""
self.overrides[(entry_point_type,factory_name)] = factory
def lookup_factory(self, entry_point_type, factory_name, factory_type, requirement=require_mandelbrot):
"""
:param entry_point_type:
:type entry_point_type: str
:param factory_name:
:type factory_name: str
:param factory_type:
:type factory_type: type
:param requirement:
:type requirement: str
"""
log.debug("looking up '%s' of type %s with requirement %s", factory_name,
entry_point_type, requirement)
# check factory overrides first
if (entry_point_type,factory_name) in self.overrides:
factory = self.overrides[(entry_point_type,factory_name)]
# find the entrypoint matching the specified requirement
else:
requirement = pkg_resources.Requirement.parse(requirement)
distribution = pkg_resources.working_set.find(requirement)
factory = distribution.load_entry_point(entry_point_type, factory_name)
log.debug("loaded factory %s.%s", factory.__module__, factory.__class__.__name__)
# verify that the factory is the correct type
if not issubclass(factory, factory_type):
raise TypeError("{}.{} is not a subclass of {}".format(
factory.__module__, factory.__class__.__name__, factory_type.__name__))
return factory
| gpl-3.0 | -5,360,309,663,945,176,000 | 39.013333 | 107 | 0.654782 | false |
fga-gpp-mds/2017.2-Receituario-Medico | medical_prescription/exam/test/test_view_list_exam.py | 1 | 2093 | # Django imports
from django.test import TestCase
from django.test.client import RequestFactory
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import PermissionDenied
# Local Django imports
from exam.views import ListExams
from user.models import User, Patient, HealthProfessional
class ListExamsTest(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.health_professional = HealthProfessional.objects.create_user(email='[email protected]', password='senha12')
self.patient = Patient.objects.create_user(email='[email protected]',
password='senha12',
CEP='72850735',
UF='DF',
city='Brasília',
neighborhood='Asa sul',
complement='Bloco 2 QD 701')
self.user = User.objects.create_user(email='[email protected]', password='senha12')
def teste_exam_get_exam_without_login(self):
request = self.factory.get('/exam/list_exams/')
request.user = AnonymousUser()
response = ListExams.as_view()(request)
self.assertEqual(response.status_code, 302)
def teste_exam_get_exam_with_patient(self):
request = self.factory.get('/exam/list_exams/')
request.user = self.patient
with self.assertRaises(PermissionDenied):
ListExams.as_view()(request)
def teste_exam_get_exam_with_user(self):
request = self.factory.get('/exam/list_exams/')
request.user = self.user
with self.assertRaises(PermissionDenied):
ListExams.as_view()(request)
def teste_exam_get_exam_with_health_professional(self):
request = self.factory.get('/exam/list_exams/')
request.user = self.health_professional
response = ListExams.as_view()(request)
self.assertEqual(response.status_code, 200)
| mit | -3,392,300,360,544,761,000 | 40.019608 | 120 | 0.596558 | false |
rshk/python-pcapng | tests/test_parse_wireshark_capture_files.py | 1 | 10200 | import pytest
from pcapng.blocks import InterfaceDescription, ObsoletePacket, SectionHeader
from pcapng.scanner import FileScanner
def test_sample_test001_ntar():
with open("test_data/test001.ntar", "rb") as fp:
scanner = FileScanner(fp)
blocks = list(scanner)
# There is just a section header
assert len(blocks) == 1
assert blocks[0].endianness == "<"
assert blocks[0].version == (1, 0)
assert blocks[0].length == -1
assert len(blocks[0].options) == 0
assert len(blocks[0].interfaces) == 0
def test_sample_test002_ntar():
with open("test_data/test002.ntar", "rb") as fp:
scanner = FileScanner(fp)
blocks = list(scanner)
# Section header, interface description
assert len(blocks) == 2
assert isinstance(blocks[0], SectionHeader)
assert blocks[0].endianness == "<"
assert blocks[0].version == (1, 0)
assert blocks[0].length == -1
assert len(blocks[0].options) == 0
assert len(blocks[0].interfaces) == 1
assert isinstance(blocks[1], InterfaceDescription)
assert blocks[1].link_type == 0 # Unknown link type
assert blocks[1].snaplen == 0
assert len(blocks[1].options) == 0
def test_sample_test003_ntar():
with open("test_data/test003.ntar", "rb") as fp:
scanner = FileScanner(fp)
blocks = list(scanner)
# Section header, interface description
assert len(blocks) == 2
assert isinstance(blocks[0], SectionHeader)
assert blocks[0].endianness == "<"
assert blocks[0].version == (1, 0)
assert blocks[0].length == -1
assert len(blocks[0].options) == 0
assert len(blocks[0].interfaces) == 1
assert isinstance(blocks[1], InterfaceDescription)
assert blocks[1].link_type == 0x04D8 # ???
assert blocks[1].snaplen == 0x7C
assert len(blocks[1].options) == 0
def test_sample_test004_ntar():
with open("test_data/test004.ntar", "rb") as fp:
scanner = FileScanner(fp)
blocks = list(scanner)
# Section header
assert len(blocks) == 1
assert isinstance(blocks[0], SectionHeader)
assert blocks[0].endianness == "<"
assert blocks[0].version == (1, 0)
assert blocks[0].length == -1
assert len(blocks[0].options) == 2
assert blocks[0].options["shb_os"] == "Windows XP\x00" # (why NULL?)
assert blocks[0].options["shb_userappl"] == "Test004.exe\x00"
assert len(blocks[0].interfaces) == 0
def test_sample_test005_ntar():
with open("test_data/test005.ntar", "rb") as fp:
scanner = FileScanner(fp)
blocks = list(scanner)
# Section header, interface description
assert len(blocks) == 2
assert isinstance(blocks[0], SectionHeader)
assert blocks[0].endianness == "<"
assert blocks[0].version == (1, 0)
assert blocks[0].length == -1
assert len(blocks[0].options) == 0
assert len(blocks[0].interfaces) == 1
assert isinstance(blocks[1], InterfaceDescription)
assert blocks[1].link_type == 0x04D8 # ???
assert blocks[1].snaplen == 0x7C
assert len(blocks[1].options) == 2
assert (
blocks[1].options.get_raw("if_speed") == b"\x00\xe4\x0b\x54\x02\x00\x00\x00"
) # noqa
assert blocks[1].options["if_speed"] == 0x00000002540BE400
assert blocks[1].options["if_speed"] == (10 ** 10) # 10Gbit
assert blocks[1].options["if_description"] == "Stupid ethernet interface\x00"
@pytest.mark.parametrize(
"filename",
[
pytest.param("test_data/test006.ntar", marks=pytest.mark.xfail),
"test_data/test006-fixed.ntar",
],
)
def test_sample_test006_ntar(filename):
# Note: See the comment below this function
# test006.ntar is reporting an incorrect size, which causes the
# test to fail. Is this the expected behavior?
with open(filename, "rb") as fp:
scanner = FileScanner(fp)
blocks = list(scanner)
# Section header, interface description, then what??
assert len(blocks) == 3
assert isinstance(blocks[0], SectionHeader)
assert blocks[0].endianness == "<"
assert blocks[0].version == (1, 0)
assert blocks[0].length == -1
assert len(blocks[0].options) == 0
assert len(blocks[0].interfaces) == 1
assert isinstance(blocks[1], InterfaceDescription)
assert blocks[1].link_type == 2
assert blocks[1].snaplen == 96
assert len(blocks[1].options) == 2
assert blocks[1].options["if_speed"] == (10 ** 8) # 100Mbit
assert blocks[1].options["if_description"] == "Stupid ethernet interface\x00"
assert isinstance(blocks[2], ObsoletePacket)
assert blocks[2].interface_id == 0
assert blocks[2].options["pack_flags"].inout == "NA"
assert blocks[2].options["pack_flags"].casttype == "NA"
assert blocks[2].options["pack_flags"].fcslen == 0
assert blocks[2].options["pack_flags"].reserved == 0
assert blocks[2].options["pack_flags"].err_16 is False
assert blocks[2].options["pack_flags"].err_17 is False
assert blocks[2].options["pack_flags"].err_18 is False
assert blocks[2].options["pack_flags"].err_19 is False
assert blocks[2].options["pack_flags"].err_20 is False
assert blocks[2].options["pack_flags"].err_21 is False
assert blocks[2].options["pack_flags"].err_22 is False
assert blocks[2].options["pack_flags"].err_23 is False
assert blocks[2].options["pack_flags"].err_crc is False
assert blocks[2].options["pack_flags"].err_long is False
assert blocks[2].options["pack_flags"].err_short is False
assert blocks[2].options["pack_flags"].err_frame_gap is False
assert blocks[2].options["pack_flags"].err_frame_align is False
assert blocks[2].options["pack_flags"].err_frame_delim is False
assert blocks[2].options["pack_flags"].err_preamble is False
assert blocks[2].options["pack_flags"].err_symbol is False
# ============================================================
# Dissection of test006.ntar
#
# PROBLEM: Total size of packet block is incorrectly reported
# to be one byte shorter than it actually is!
# ============================================================
# -------------------- Section header --------------------
# 00000000: 0a0d 0d0a Magic number
# 00000000: 1c00 0000 Block size (28)
# 00000000: 4d3c 2b1a Byte order (LE)
# 00000000: 0100 0000 Version (1, 0)
# 00000010: ffff ffff ffff ffff Section size (-1)
# (No options)
# 00000010: 1c00 0000 Block size (28)
# -------------------- Interface description --------------------
# 00000010: 0100 0000 Block Magic
# 00000020: 4400 0000 Block total length (68)
# 00000020: 0200 Link type (2)
# 00000020: 0000 Reserved (0)
# 00000020: 6000 0000 Snapshot length
# 00000020: 0300 1a00 Option 3 - 26 bytes
# 00000030: 5374 7570 6964 2065 7468 6572 6e65 7420 Stupid ethernet
# 00000040: 696e 7465 7266 6163 6500 0000 interface
# 00000040: 0800 0800 Option 8 - 8 bytes
# 00000050: 00e1 f505 0000 0000 (speed = 100Mbps)
# 00000050: 0000 0000 End of options block
# 00000050: 4400 0000 Block total length (68)
# -------------------- Packet (Obsolete) --------------------
# 00000060: 0200 0000 Block Magic
# 00000060: a700 0000 Block size (167(!??))
# 00000060: 0000 Interface id (0)
# 00000060: 0000 Drops count
# 00000060: 0000 0000 Timestamp (high)
# 00000070: 0000 0000 Timestamp (low)
# 00000070: 7b00 0000 Captured len (123) [pad 1]
# 00000070: e803 0000 Packet len (1000)
# 00000070: 6853 11f3 ....{.......hS.. [4]
# 00000080: 3b00 0000 978f 00f3 3b00 0000 0000 0000 ;.......;....... [20]
# 00000090: 0000 0000 0000 0000 0000 0000 0000 0000 ................ [36]
# 000000a0: 0000 0000 0100 0000 0000 0000 d0f1 ffbf ................ [52]
# 000000b0: 7f00 0000 d04f 11f3 3b00 0000 6005 00f3 .....O..;...`... [68]
# 000000c0: 3b00 0000 fc06 00f3 3b00 0000 6002 00f3 ;.......;...`... [84]
# 000000d0: 3b00 0000 5806 4000 0000 0000 6853 11f3 ;[email protected].. [100]
# 000000e0: 3b00 0000 6853 11f3 0200 0000 0000 0000 ;...hS.......... [116]
# 000000f0: 0000 0000 0000 0000 ................ [124]
# 000000f0: 0200 0400 Option 2 - 4 bytes
# 000000f0: 0000 0000 0x00000000
# 00000100: 0000 0000 Options end marker
# 00000100: a700 0000 Block size (167)
def test_sample_test007_ntar():
with open("test_data/test007.ntar", "rb") as fp:
scanner = FileScanner(fp)
for entry in scanner:
pass
def test_sample_test008_ntar():
with open("test_data/test008.ntar", "rb") as fp:
scanner = FileScanner(fp)
for entry in scanner:
pass
def test_sample_test009_ntar():
with open("test_data/test009.ntar", "rb") as fp:
scanner = FileScanner(fp)
for entry in scanner:
pass
def test_sample_test010_ntar():
with open("test_data/test010.ntar", "rb") as fp:
scanner = FileScanner(fp)
for entry in scanner:
pass
| apache-2.0 | 7,110,236,058,930,983,000 | 37.78327 | 88 | 0.549902 | false |
lunixbochs/actualvim | lib/neovim/api/buffer.py | 1 | 6063 | """API for working with a Nvim Buffer."""
from .common import Remote
from ..compat import IS_PYTHON3
__all__ = ('Buffer')
if IS_PYTHON3:
basestring = str
def adjust_index(idx, default=None):
"""Convert from python indexing convention to nvim indexing convention."""
if idx is None:
return default
elif idx < 0:
return idx - 1
else:
return idx
class Buffer(Remote):
"""A remote Nvim buffer."""
_api_prefix = "nvim_buf_"
def __len__(self):
"""Return the number of lines contained in a Buffer."""
return self.request('buffer_line_count')
def _get_lines(self, start, end, strict):
lines = self.request_raw('nvim_buf_get_lines', start, end, strict)
return [line.decode('utf8') for line in lines]
def _set_lines(self, start, end, strict, lines):
lines = [line.encode('utf8') for line in lines]
return self.request_raw('nvim_buf_set_lines', start, end, strict, lines)
def __getitem__(self, idx):
"""Get a buffer line or slice by integer index.
Indexes may be negative to specify positions from the end of the
buffer. For example, -1 is the last line, -2 is the line before that
and so on.
When retrieving slices, omiting indexes(eg: `buffer[:]`) will bring
the whole buffer.
"""
if not isinstance(idx, slice):
i = adjust_index(idx)
return self._get_lines(i, i + 1, True)[0]
start = adjust_index(idx.start, 0)
end = adjust_index(idx.stop, -1)
return self._get_lines(start, end, False)
def __setitem__(self, idx, item):
"""Replace a buffer line or slice by integer index.
Like with `__getitem__`, indexes may be negative.
When replacing slices, omiting indexes(eg: `buffer[:]`) will replace
the whole buffer.
"""
if not isinstance(idx, slice):
i = adjust_index(idx)
lines = [item] if item is not None else []
return self._set_lines(i, i + 1, True, lines)
lines = item if item is not None else []
start = adjust_index(idx.start, 0)
end = adjust_index(idx.stop, -1)
return self._set_lines(start, end, False, lines)
def __iter__(self):
"""Iterate lines of a buffer.
This will retrieve all lines locally before iteration starts. This
approach is used because for most cases, the gain is much greater by
minimizing the number of API calls by transfering all data needed to
work.
"""
lines = self[:]
for line in lines:
yield line
def __delitem__(self, idx):
"""Delete line or slice of lines from the buffer.
This is the same as __setitem__(idx, [])
"""
self.__setitem__(idx, None)
def append(self, lines, index=-1):
"""Append a string or list of lines to the buffer."""
if isinstance(lines, (basestring, bytes)):
lines = [lines]
return self._set_lines(index, index, True, lines)
def mark(self, name):
"""Return (row, col) tuple for a named mark."""
return self.request('nvim_buf_get_mark', name)
def range(self, start, end):
"""Return a `Range` object, which represents part of the Buffer."""
return Range(self, start, end)
def add_highlight(self, hl_group, line, col_start=0,
col_end=-1, src_id=-1, async=None):
"""Add a highlight to the buffer."""
if async is None:
async = (src_id != 0)
return self.request('nvim_buf_add_highlight', src_id, hl_group,
line, col_start, col_end, async=async)
def clear_highlight(self, src_id, line_start=0, line_end=-1, async=True):
"""Clear highlights from the buffer."""
self.request('nvim_buf_clear_highlight', src_id,
line_start, line_end, async=async)
@property
def name(self):
"""Get the buffer name."""
return self.request('nvim_buf_get_name')
@name.setter
def name(self, value):
"""Set the buffer name. BufFilePre/BufFilePost are triggered."""
return self.request('nvim_buf_set_name', value)
@property
def valid(self):
"""Return True if the buffer still exists."""
return self.request('nvim_buf_is_valid')
@property
def number(self):
"""Get the buffer number."""
return self.handle
class Range(object):
def __init__(self, buffer, start, end):
self._buffer = buffer
self.start = start - 1
self.end = end - 1
def __len__(self):
return self.end - self.start + 1
def __getitem__(self, idx):
if not isinstance(idx, slice):
return self._buffer[self._normalize_index(idx)]
start = self._normalize_index(idx.start)
end = self._normalize_index(idx.stop)
if start is None:
start = self.start
if end is None:
end = self.end + 1
return self._buffer[start:end]
def __setitem__(self, idx, lines):
if not isinstance(idx, slice):
self._buffer[self._normalize_index(idx)] = lines
return
start = self._normalize_index(idx.start)
end = self._normalize_index(idx.stop)
if start is None:
start = self.start
if end is None:
end = self.end + 1
self._buffer[start:end] = lines
def __iter__(self):
for i in range(self.start, self.end + 1):
yield self._buffer[i]
def append(self, lines, i=None):
i = self._normalize_index(i)
if i is None:
i = self.end + 1
self._buffer.append(lines, i)
def _normalize_index(self, index):
if index is None:
return None
if index < 0:
index = self.end
else:
index += self.start
if index > self.end:
index = self.end
return index
| mit | 3,638,414,735,793,391,600 | 30.414508 | 80 | 0.568036 | false |
mikehankey/fireball_camera | ffmpeg_record.py | 1 | 2859 | #!/usr/bin/python3
import glob
import sys
import subprocess
import os
import time
video_dir = "/mnt/ams2"
def check_running(cam_num, type):
if type == "HD":
cmd = "ps -aux |grep \"ffmpeg\" | grep \"HD\" | grep \"cam" + cam_num + "\" | grep -v grep | wc -l"
else:
cmd = "ps -aux |grep \"ffmpeg\" | grep \"SD\" | grep \"cam" + cam_num + "\" | grep -v grep | wc -l"
print(cmd)
output = subprocess.check_output(cmd, shell=True).decode("utf-8")
output = int(output.replace("\n", ""))
return(int(output))
def start_capture(cam_num):
running = check_running(cam_num, "HD")
if running == 0:
cmd = "/usr/bin/ffmpeg -i rtsp://192.168.76.7" + cam_num + "/av0_0 -c copy -map 0 -f segment -strftime 1 -segment_time 60 -segment_format mp4 \"" + video_dir + "/HD/" + "%Y-%m-%d_%H-%M-%S-cam" + cam_num + ".mp4\" 2>&1 > /dev/null & "
print(cmd)
os.system(cmd)
time.sleep(2)
else:
print ("ffmpeg already running for cam:", cam_num)
running = check_running(cam_num, "SD")
if running == 0:
cmd = "/usr/bin/ffmpeg -i rtsp://192.168.76.7" + cam_num + "/av0_1 -c copy -map 0 -f segment -strftime 1 -segment_time 60 -segment_format mp4 \"" + video_dir + "/SD/" + "%Y-%m-%d_%H-%M-%S-cam" + cam_num + ".mp4\" 2>&1 > /dev/null & "
print(cmd)
os.system(cmd)
time.sleep(2)
else:
print ("ffmpeg already running for cam:", cam_num)
def stop_capture(cam_num):
#print ("Stopping capture for ", cam_num)
cmd = "kill -9 `ps -aux | grep ffmpeg |grep -v grep| awk '{print $2}'`"
output = subprocess.check_output(cmd, shell=True).decode("utf-8")
print (output)
def purge(cam_num):
cur_time = int(time.time())
#cmd = "rm " + cam_num + "/*"
#print (cmd)
#os.system(cmd)
for filename in (glob.glob(video_dir + '/' + cam_num + '/*.mp4')):
st = os.stat(filename)
mtime = st.st_mtime
tdiff = cur_time - mtime
tdiff = tdiff / 60 / 60 / 24
if tdiff >= .8:
cmd = "rm " + filename
print(cmd)
os.system(cmd)
#file_list.append(filename)
try:
cmd = sys.argv[1]
cam_num = sys.argv[2]
except:
do_all = 1
if (cmd == "stop"):
stop_capture("1")
if (cmd == "start"):
start_capture(cam_num)
if (cmd == "start_all"):
start_capture("1")
start_capture("2")
start_capture("3")
start_capture("4")
start_capture("5")
start_capture("6")
if (cmd == "purge"):
purge(cam_num)
if (cmd == "check_running"):
running = check_running(cam_num, "HD")
print (running)
running = check_running(cam_num, "SD")
print (running)
if (cmd == "purge_all"):
purge("1")
purge("2")
purge("3")
purge("4")
purge("5")
purge("6")
#ffmpeg -i rtsp://192.168.76.71/av0_1 -c copy -map 0 -f segment -segment_time 60 -segment_format mp4 "1/capture-1-%03d.mp4" &
| gpl-3.0 | 4,738,862,852,939,186,000 | 25.971698 | 239 | 0.564533 | false |
release-engineering/fedmsg_meta_umb | fedmsg_meta_umb/rpmdiff.py | 1 | 2379 | # Copyright (C) 2017 Red Hat, Inc.
#
# fedmsg_meta_umb is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# fedmsg_meta_umb is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with fedmsg; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Authors: Ralph Bean <[email protected]>
from fedmsg.meta.base import BaseProcessor
class RPMDiffProcessor(BaseProcessor):
topic_prefix_re = r'/topic/VirtualTopic\.eng'
__name__ = 'rpmdiff'
__description__ = 'the rpmdiff analysis system'
__link__ = 'https://rpmdiff.engineering.redhat.com/'
__docs__ = 'https://docs.engineering.redhat.com/display/EXD/rpmdiff'
__obj__ = 'RPMDiff Analysis System'
__icon__ = '_static/img/icons/erratatool50.png'
def title(self, msg, **config):
return msg['topic'].split('.', 2)[-1]
def subtitle(self, msg, **config):
action = self.title(msg, **config).split('.')[-1]
if msg['msg']['type'] == 'COMPARISON':
kwargs = dict(
action=action,
package=msg['msg']['package_name'],
baseline='-'.join(msg['msg']['baseline'].rsplit('-', 2)[1:]),
target='-'.join(msg['msg']['nvr'].rsplit('-', 2)[1:]),
)
template = ('rpmdiff comparison of {package} is {action} '
'({target} against {baseline})')
return template.format(**kwargs)
elif msg['msg']['type'] == 'ANALYSIS':
kwargs = dict(action=action, nvr=msg['msg']['nvr'])
template = 'rpmdiff analysis of {nvr} is {action}'
return template.format(**kwargs)
def packages(self, msg, **config):
return set([msg['msg']['package_name']])
def link(self, msg, **config):
template = 'https://rpmdiff.engineering.redhat.com/run/{run_id}/'
return template.format(**msg['msg'])
| lgpl-2.1 | 5,623,728,822,967,871,000 | 40.736842 | 78 | 0.623792 | false |
jesuscript/topo-mpi | param/external.py | 1 | 68357 | """
External code required for param/tkinter interface.
* odict: an ordered dictionary
* tilewrapper: a wrapper for Tile/ttk widgets
Note that an ordered dictionary and a wrapper for ttk widgets are both
available in Python 2.7.
"""
from __future__ import generators
# odict.py
# An Ordered Dictionary object
# Copyright (C) 2005 Nicola Larosa, Michael Foord
# E-mail: nico AT tekNico DOT net, fuzzyman AT voidspace DOT org DOT uk
# This software is licensed under the terms of the BSD license.
# http://www.voidspace.org.uk/python/license.shtml
# Basically you're free to copy, modify, distribute and relicense it,
# So long as you keep a copy of the license with it.
# Documentation at http://www.voidspace.org.uk/python/odict.html
# For information about bugfixes, updates and support, please join the
# Pythonutils mailing list:
# http://groups.google.com/group/pythonutils/
# Comments, suggestions and bug reports welcome.
"""A dict that keeps keys in insertion order"""
__author__ = ('Nicola Larosa <[email protected]>,'
'Michael Foord <fuzzyman AT voidspace DOT org DOT uk>')
__docformat__ = "restructuredtext en"
__revision__ = '$Id$'
__version__ = '0.2.2'
__all__ = ['OrderedDict', 'SequenceOrderedDict']
import sys
INTP_VER = sys.version_info[:2]
if INTP_VER < (2, 2):
raise RuntimeError("Python v.2.2 or later required")
import types, warnings
class OrderedDict(dict):
"""
A class of dictionary that keeps the insertion order of keys.
All appropriate methods return keys, items, or values in an ordered way.
All normal dictionary methods are available. Update and comparison is
restricted to other OrderedDict objects.
Various sequence methods are available, including the ability to explicitly
mutate the key ordering.
__contains__ tests:
>>> d = OrderedDict(((1, 3),))
>>> 1 in d
1
>>> 4 in d
0
__getitem__ tests:
>>> OrderedDict(((1, 3), (3, 2), (2, 1)))[2]
1
>>> OrderedDict(((1, 3), (3, 2), (2, 1)))[4]
Traceback (most recent call last):
KeyError: 4
__len__ tests:
>>> len(OrderedDict())
0
>>> len(OrderedDict(((1, 3), (3, 2), (2, 1))))
3
get tests:
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.get(1)
3
>>> d.get(4) is None
1
>>> d.get(4, 5)
5
>>> d
OrderedDict([(1, 3), (3, 2), (2, 1)])
has_key tests:
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.has_key(1)
1
>>> d.has_key(4)
0
"""
def __init__(self, init_val=(), strict=False):
"""
Create a new ordered dictionary. Cannot init from a normal dict,
nor from kwargs, since items order is undefined in those cases.
If the ``strict`` keyword argument is ``True`` (``False`` is the
default) then when doing slice assignment - the ``OrderedDict`` you are
assigning from *must not* contain any keys in the remaining dict.
>>> OrderedDict()
OrderedDict([])
>>> OrderedDict({1: 1})
Traceback (most recent call last):
TypeError: undefined order, cannot get items from dict
>>> OrderedDict({1: 1}.items())
OrderedDict([(1, 1)])
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d
OrderedDict([(1, 3), (3, 2), (2, 1)])
>>> OrderedDict(d)
OrderedDict([(1, 3), (3, 2), (2, 1)])
"""
self.strict = strict
dict.__init__(self)
if isinstance(init_val, OrderedDict):
self._sequence = init_val.keys()
dict.update(self, init_val)
elif isinstance(init_val, dict):
# we lose compatibility with other ordered dict types this way
raise TypeError('undefined order, cannot get items from dict')
else:
self._sequence = []
self.update(init_val)
### Special methods ###
def __delitem__(self, key):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> del d[3]
>>> d
OrderedDict([(1, 3), (2, 1)])
>>> del d[3]
Traceback (most recent call last):
KeyError: 3
>>> d[3] = 2
>>> d
OrderedDict([(1, 3), (2, 1), (3, 2)])
>>> del d[0:1]
>>> d
OrderedDict([(2, 1), (3, 2)])
"""
if isinstance(key, types.SliceType):
# FIXME: efficiency?
keys = self._sequence[key]
for entry in keys:
dict.__delitem__(self, entry)
del self._sequence[key]
else:
# do the dict.__delitem__ *first* as it raises
# the more appropriate error
dict.__delitem__(self, key)
self._sequence.remove(key)
def __eq__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d == OrderedDict(d)
True
>>> d == OrderedDict(((1, 3), (2, 1), (3, 2)))
False
>>> d == OrderedDict(((1, 0), (3, 2), (2, 1)))
False
>>> d == OrderedDict(((0, 3), (3, 2), (2, 1)))
False
>>> d == dict(d)
False
>>> d == False
False
"""
if isinstance(other, OrderedDict):
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() == other.items())
else:
return False
def __lt__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> c = OrderedDict(((0, 3), (3, 2), (2, 1)))
>>> c < d
True
>>> d < c
False
>>> d < dict(c)
Traceback (most recent call last):
TypeError: Can only compare with other OrderedDicts
"""
if not isinstance(other, OrderedDict):
raise TypeError('Can only compare with other OrderedDicts')
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() < other.items())
def __le__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> c = OrderedDict(((0, 3), (3, 2), (2, 1)))
>>> e = OrderedDict(d)
>>> c <= d
True
>>> d <= c
False
>>> d <= dict(c)
Traceback (most recent call last):
TypeError: Can only compare with other OrderedDicts
>>> d <= e
True
"""
if not isinstance(other, OrderedDict):
raise TypeError('Can only compare with other OrderedDicts')
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() <= other.items())
def __ne__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d != OrderedDict(d)
False
>>> d != OrderedDict(((1, 3), (2, 1), (3, 2)))
True
>>> d != OrderedDict(((1, 0), (3, 2), (2, 1)))
True
>>> d == OrderedDict(((0, 3), (3, 2), (2, 1)))
False
>>> d != dict(d)
True
>>> d != False
True
"""
if isinstance(other, OrderedDict):
# FIXME: efficiency?
# Generate both item lists for each compare
return not (self.items() == other.items())
else:
return True
def __gt__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> c = OrderedDict(((0, 3), (3, 2), (2, 1)))
>>> d > c
True
>>> c > d
False
>>> d > dict(c)
Traceback (most recent call last):
TypeError: Can only compare with other OrderedDicts
"""
if not isinstance(other, OrderedDict):
raise TypeError('Can only compare with other OrderedDicts')
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() > other.items())
def __ge__(self, other):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> c = OrderedDict(((0, 3), (3, 2), (2, 1)))
>>> e = OrderedDict(d)
>>> c >= d
False
>>> d >= c
True
>>> d >= dict(c)
Traceback (most recent call last):
TypeError: Can only compare with other OrderedDicts
>>> e >= d
True
"""
if not isinstance(other, OrderedDict):
raise TypeError('Can only compare with other OrderedDicts')
# FIXME: efficiency?
# Generate both item lists for each compare
return (self.items() >= other.items())
def __repr__(self):
"""
Used for __repr__ and __str__
>>> r1 = repr(OrderedDict((('a', 'b'), ('c', 'd'), ('e', 'f'))))
>>> r1
"OrderedDict([('a', 'b'), ('c', 'd'), ('e', 'f')])"
>>> r2 = repr(OrderedDict((('a', 'b'), ('e', 'f'), ('c', 'd'))))
>>> r2
"OrderedDict([('a', 'b'), ('e', 'f'), ('c', 'd')])"
>>> r1 == str(OrderedDict((('a', 'b'), ('c', 'd'), ('e', 'f'))))
True
>>> r2 == str(OrderedDict((('a', 'b'), ('e', 'f'), ('c', 'd'))))
True
"""
return '%s([%s])' % (self.__class__.__name__, ', '.join(
['(%r, %r)' % (key, self[key]) for key in self._sequence]))
def __setitem__(self, key, val):
"""
Allows slice assignment, so long as the slice is an OrderedDict
>>> d = OrderedDict()
>>> d['a'] = 'b'
>>> d['b'] = 'a'
>>> d[3] = 12
>>> d
OrderedDict([('a', 'b'), ('b', 'a'), (3, 12)])
>>> d[:] = OrderedDict(((1, 2), (2, 3), (3, 4)))
>>> d
OrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d[::2] = OrderedDict(((7, 8), (9, 10)))
>>> d
OrderedDict([(7, 8), (2, 3), (9, 10)])
>>> d = OrderedDict(((0, 1), (1, 2), (2, 3), (3, 4)))
>>> d[1:3] = OrderedDict(((1, 2), (5, 6), (7, 8)))
>>> d
OrderedDict([(0, 1), (1, 2), (5, 6), (7, 8), (3, 4)])
>>> d = OrderedDict(((0, 1), (1, 2), (2, 3), (3, 4)), strict=True)
>>> d[1:3] = OrderedDict(((1, 2), (5, 6), (7, 8)))
>>> d
OrderedDict([(0, 1), (1, 2), (5, 6), (7, 8), (3, 4)])
>>> a = OrderedDict(((0, 1), (1, 2), (2, 3)), strict=True)
>>> a[3] = 4
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[::1] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[:2] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)])
Traceback (most recent call last):
ValueError: slice assignment must be from unique keys
>>> a = OrderedDict(((0, 1), (1, 2), (2, 3)))
>>> a[3] = 4
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[::1] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[:2] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a
OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a[::-1] = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> a
OrderedDict([(3, 4), (2, 3), (1, 2), (0, 1)])
>>> d = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> d[:1] = 3
Traceback (most recent call last):
TypeError: slice assignment requires an OrderedDict
>>> d = OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)])
>>> d[:1] = OrderedDict([(9, 8)])
>>> d
OrderedDict([(9, 8), (1, 2), (2, 3), (3, 4)])
"""
if isinstance(key, types.SliceType):
if not isinstance(val, OrderedDict):
# FIXME: allow a list of tuples?
raise TypeError('slice assignment requires an OrderedDict')
keys = self._sequence[key]
# NOTE: Could use ``range(*key.indices(len(self._sequence)))``
indexes = range(len(self._sequence))[key]
if key.step is None:
# NOTE: new slice may not be the same size as the one being
# overwritten !
# NOTE: What is the algorithm for an impossible slice?
# e.g. d[5:3]
pos = key.start or 0
del self[key]
newkeys = val.keys()
for k in newkeys:
if k in self:
if self.strict:
raise ValueError('slice assignment must be from '
'unique keys')
else:
# NOTE: This removes duplicate keys *first*
# so start position might have changed?
del self[k]
self._sequence = (self._sequence[:pos] + newkeys +
self._sequence[pos:])
dict.update(self, val)
else:
# extended slice - length of new slice must be the same
# as the one being replaced
if len(keys) != len(val):
raise ValueError('attempt to assign sequence of size %s '
'to extended slice of size %s' % (len(val), len(keys)))
# FIXME: efficiency?
del self[key]
item_list = zip(indexes, val.items())
# smallest indexes first - higher indexes not guaranteed to
# exist
item_list.sort()
for pos, (newkey, newval) in item_list:
if self.strict and newkey in self:
raise ValueError('slice assignment must be from unique'
' keys')
self.insert(pos, newkey, newval)
else:
if key not in self:
self._sequence.append(key)
dict.__setitem__(self, key, val)
def __getitem__(self, key):
"""
Allows slicing. Returns an OrderedDict if you slice.
>>> b = OrderedDict([(7, 0), (6, 1), (5, 2), (4, 3), (3, 4), (2, 5), (1, 6)])
>>> b[::-1]
OrderedDict([(1, 6), (2, 5), (3, 4), (4, 3), (5, 2), (6, 1), (7, 0)])
>>> b[2:5]
OrderedDict([(5, 2), (4, 3), (3, 4)])
>>> type(b[2:4])
<class '__main__.OrderedDict'>
"""
if isinstance(key, types.SliceType):
# FIXME: does this raise the error we want?
keys = self._sequence[key]
# FIXME: efficiency?
return OrderedDict([(entry, self[entry]) for entry in keys])
else:
return dict.__getitem__(self, key)
__str__ = __repr__
def __setattr__(self, name, value):
"""
Implemented so that accesses to ``sequence`` raise a warning and are
diverted to the new ``setkeys`` method.
"""
if name == 'sequence':
warnings.warn('Use of the sequence attribute is deprecated.'
' Use the keys method instead.', DeprecationWarning)
# NOTE: doesn't return anything
self.setkeys(value)
else:
# FIXME: do we want to allow arbitrary setting of attributes?
# Or do we want to manage it?
object.__setattr__(self, name, value)
def __getattr__(self, name):
"""
Implemented so that access to ``sequence`` raises a warning.
>>> d = OrderedDict()
>>> d.sequence
[]
"""
if name == 'sequence':
warnings.warn('Use of the sequence attribute is deprecated.'
' Use the keys method instead.', DeprecationWarning)
# NOTE: Still (currently) returns a direct reference. Need to
# because code that uses sequence will expect to be able to
# mutate it in place.
return self._sequence
else:
# raise the appropriate error
raise AttributeError("OrderedDict has no '%s' attribute" % name)
def __deepcopy__(self, memo):
"""
To allow deepcopy to work with OrderedDict.
>>> from copy import deepcopy
>>> a = OrderedDict([(1, 1), (2, 2), (3, 3)])
>>> a['test'] = {}
>>> b = deepcopy(a)
>>> b == a
True
>>> b is a
False
>>> a['test'] is b['test']
False
"""
from copy import deepcopy
return self.__class__(deepcopy(self.items(), memo), self.strict)
### Read-only methods ###
def copy(self):
"""
>>> OrderedDict(((1, 3), (3, 2), (2, 1))).copy()
OrderedDict([(1, 3), (3, 2), (2, 1)])
"""
return OrderedDict(self)
def items(self):
"""
``items`` returns a list of tuples representing all the
``(key, value)`` pairs in the dictionary.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.items()
[(1, 3), (3, 2), (2, 1)]
>>> d.clear()
>>> d.items()
[]
"""
return zip(self._sequence, self.values())
def keys(self):
"""
Return a list of keys in the ``OrderedDict``.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.keys()
[1, 3, 2]
"""
return self._sequence[:]
def values(self, values=None):
"""
Return a list of all the values in the OrderedDict.
Optionally you can pass in a list of values, which will replace the
current list. The value list must be the same len as the OrderedDict.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.values()
[3, 2, 1]
"""
return [self[key] for key in self._sequence]
def iteritems(self):
"""
>>> ii = OrderedDict(((1, 3), (3, 2), (2, 1))).iteritems()
>>> ii.next()
(1, 3)
>>> ii.next()
(3, 2)
>>> ii.next()
(2, 1)
>>> ii.next()
Traceback (most recent call last):
StopIteration
"""
def make_iter(self=self):
keys = self.iterkeys()
while True:
key = keys.next()
yield (key, self[key])
return make_iter()
def iterkeys(self):
"""
>>> ii = OrderedDict(((1, 3), (3, 2), (2, 1))).iterkeys()
>>> ii.next()
1
>>> ii.next()
3
>>> ii.next()
2
>>> ii.next()
Traceback (most recent call last):
StopIteration
"""
return iter(self._sequence)
__iter__ = iterkeys
def itervalues(self):
"""
>>> iv = OrderedDict(((1, 3), (3, 2), (2, 1))).itervalues()
>>> iv.next()
3
>>> iv.next()
2
>>> iv.next()
1
>>> iv.next()
Traceback (most recent call last):
StopIteration
"""
def make_iter(self=self):
keys = self.iterkeys()
while True:
yield self[keys.next()]
return make_iter()
### Read-write methods ###
def clear(self):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.clear()
>>> d
OrderedDict([])
"""
dict.clear(self)
self._sequence = []
def pop(self, key, *args):
"""
No dict.pop in Python 2.2, gotta reimplement it
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.pop(3)
2
>>> d
OrderedDict([(1, 3), (2, 1)])
>>> d.pop(4)
Traceback (most recent call last):
KeyError: 4
>>> d.pop(4, 0)
0
>>> d.pop(4, 0, 1)
Traceback (most recent call last):
TypeError: pop expected at most 2 arguments, got 3
"""
if len(args) > 1:
raise TypeError, ('pop expected at most 2 arguments, got %s' %
(len(args) + 1))
if key in self:
val = self[key]
del self[key]
else:
try:
val = args[0]
except IndexError:
raise KeyError(key)
return val
def popitem(self, i=-1):
"""
Delete and return an item specified by index, not a random one as in
dict. The index is -1 by default (the last item).
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.popitem()
(2, 1)
>>> d
OrderedDict([(1, 3), (3, 2)])
>>> d.popitem(0)
(1, 3)
>>> OrderedDict().popitem()
Traceback (most recent call last):
KeyError: 'popitem(): dictionary is empty'
>>> d.popitem(2)
Traceback (most recent call last):
IndexError: popitem(): index 2 not valid
"""
if not self._sequence:
raise KeyError('popitem(): dictionary is empty')
try:
key = self._sequence[i]
except IndexError:
raise IndexError('popitem(): index %s not valid' % i)
return (key, self.pop(key))
def setdefault(self, key, defval = None):
"""
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.setdefault(1)
3
>>> d.setdefault(4) is None
True
>>> d
OrderedDict([(1, 3), (3, 2), (2, 1), (4, None)])
>>> d.setdefault(5, 0)
0
>>> d
OrderedDict([(1, 3), (3, 2), (2, 1), (4, None), (5, 0)])
"""
if key in self:
return self[key]
else:
self[key] = defval
return defval
def update(self, from_od):
"""
Update from another OrderedDict or sequence of (key, value) pairs
>>> d = OrderedDict(((1, 0), (0, 1)))
>>> d.update(OrderedDict(((1, 3), (3, 2), (2, 1))))
>>> d
OrderedDict([(1, 3), (0, 1), (3, 2), (2, 1)])
>>> d.update({4: 4})
Traceback (most recent call last):
TypeError: undefined order, cannot get items from dict
>>> d.update((4, 4))
Traceback (most recent call last):
TypeError: cannot convert dictionary update sequence element "4" to a 2-item sequence
"""
if isinstance(from_od, OrderedDict):
for key, val in from_od.items():
self[key] = val
elif isinstance(from_od, dict):
# we lose compatibility with other ordered dict types this way
raise TypeError('undefined order, cannot get items from dict')
else:
# FIXME: efficiency?
# sequence of 2-item sequences, or error
for item in from_od:
try:
key, val = item
except TypeError:
raise TypeError('cannot convert dictionary update'
' sequence element "%s" to a 2-item sequence' % item)
self[key] = val
def rename(self, old_key, new_key):
"""
Rename the key for a given value, without modifying sequence order.
For the case where new_key already exists this raise an exception,
since if new_key exists, it is ambiguous as to what happens to the
associated values, and the position of new_key in the sequence.
>>> od = OrderedDict()
>>> od['a'] = 1
>>> od['b'] = 2
>>> od.items()
[('a', 1), ('b', 2)]
>>> od.rename('b', 'c')
>>> od.items()
[('a', 1), ('c', 2)]
>>> od.rename('c', 'a')
Traceback (most recent call last):
ValueError: New key already exists: 'a'
>>> od.rename('d', 'b')
Traceback (most recent call last):
KeyError: 'd'
"""
if new_key == old_key:
# no-op
return
if new_key in self:
raise ValueError("New key already exists: %r" % new_key)
# rename sequence entry
value = self[old_key]
old_idx = self._sequence.index(old_key)
self._sequence[old_idx] = new_key
# rename internal dict entry
dict.__delitem__(self, old_key)
dict.__setitem__(self, new_key, value)
def setitems(self, items):
"""
This method allows you to set the items in the dict.
It takes a list of tuples - of the same sort returned by the ``items``
method.
>>> d = OrderedDict()
>>> d.setitems(((3, 1), (2, 3), (1, 2)))
>>> d
OrderedDict([(3, 1), (2, 3), (1, 2)])
"""
self.clear()
# FIXME: this allows you to pass in an OrderedDict as well :-)
self.update(items)
def setkeys(self, keys):
"""
``setkeys`` all ows you to pass in a new list of keys which will
replace the current set. This must contain the same set of keys, but
need not be in the same order.
If you pass in new keys that don't match, a ``KeyError`` will be
raised.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.keys()
[1, 3, 2]
>>> d.setkeys((1, 2, 3))
>>> d
OrderedDict([(1, 3), (2, 1), (3, 2)])
>>> d.setkeys(['a', 'b', 'c'])
Traceback (most recent call last):
KeyError: 'Keylist is not the same as current keylist.'
"""
# FIXME: Efficiency? (use set for Python 2.4 :-)
# NOTE: list(keys) rather than keys[:] because keys[:] returns
# a tuple, if keys is a tuple.
kcopy = list(keys)
kcopy.sort()
self._sequence.sort()
if kcopy != self._sequence:
raise KeyError('Keylist is not the same as current keylist.')
# NOTE: This makes the _sequence attribute a new object, instead
# of changing it in place.
# FIXME: efficiency?
self._sequence = list(keys)
def setvalues(self, values):
"""
You can pass in a list of values, which will replace the
current list. The value list must be the same len as the OrderedDict.
(Or a ``ValueError`` is raised.)
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.setvalues((1, 2, 3))
>>> d
OrderedDict([(1, 1), (3, 2), (2, 3)])
>>> d.setvalues([6])
Traceback (most recent call last):
ValueError: Value list is not the same length as the OrderedDict.
"""
if len(values) != len(self):
# FIXME: correct error to raise?
raise ValueError('Value list is not the same length as the '
'OrderedDict.')
self.update(zip(self, values))
### Sequence Methods ###
def index(self, key):
"""
Return the position of the specified key in the OrderedDict.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.index(3)
1
>>> d.index(4)
Traceback (most recent call last):
ValueError: list.index(x): x not in list
"""
return self._sequence.index(key)
def insert(self, index, key, value):
"""
Takes ``index``, ``key``, and ``value`` as arguments.
Sets ``key`` to ``value``, so that ``key`` is at position ``index`` in
the OrderedDict.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.insert(0, 4, 0)
>>> d
OrderedDict([(4, 0), (1, 3), (3, 2), (2, 1)])
>>> d.insert(0, 2, 1)
>>> d
OrderedDict([(2, 1), (4, 0), (1, 3), (3, 2)])
>>> d.insert(8, 8, 1)
>>> d
OrderedDict([(2, 1), (4, 0), (1, 3), (3, 2), (8, 1)])
"""
if key in self:
# FIXME: efficiency?
del self[key]
self._sequence.insert(index, key)
dict.__setitem__(self, key, value)
def reverse(self):
"""
Reverse the order of the OrderedDict.
>>> d = OrderedDict(((1, 3), (3, 2), (2, 1)))
>>> d.reverse()
>>> d
OrderedDict([(2, 1), (3, 2), (1, 3)])
"""
self._sequence.reverse()
def sort(self, *args, **kwargs):
"""
Sort the key order in the OrderedDict.
This method takes the same arguments as the ``list.sort`` method on
your version of Python.
>>> d = OrderedDict(((4, 1), (2, 2), (3, 3), (1, 4)))
>>> d.sort()
>>> d
OrderedDict([(1, 4), (2, 2), (3, 3), (4, 1)])
"""
self._sequence.sort(*args, **kwargs)
class Keys(object):
# FIXME: should this object be a subclass of list?
"""
Custom object for accessing the keys of an OrderedDict.
Can be called like the normal ``OrderedDict.keys`` method, but also
supports indexing and sequence methods.
"""
def __init__(self, main):
self._main = main
def __call__(self):
"""Pretend to be the keys method."""
return self._main._keys()
def __getitem__(self, index):
"""Fetch the key at position i."""
# NOTE: this automatically supports slicing :-)
return self._main._sequence[index]
def __setitem__(self, index, name):
"""
You cannot assign to keys, but you can do slice assignment to re-order
them.
You can only do slice assignment if the new set of keys is a reordering
of the original set.
"""
if isinstance(index, types.SliceType):
# FIXME: efficiency?
# check length is the same
indexes = range(len(self._main._sequence))[index]
if len(indexes) != len(name):
raise ValueError('attempt to assign sequence of size %s '
'to slice of size %s' % (len(name), len(indexes)))
# check they are the same keys
# FIXME: Use set
old_keys = self._main._sequence[index]
new_keys = list(name)
old_keys.sort()
new_keys.sort()
if old_keys != new_keys:
raise KeyError('Keylist is not the same as current keylist.')
orig_vals = [self._main[k] for k in name]
del self._main[index]
vals = zip(indexes, name, orig_vals)
vals.sort()
for i, k, v in vals:
if self._main.strict and k in self._main:
raise ValueError('slice assignment must be from '
'unique keys')
self._main.insert(i, k, v)
else:
raise ValueError('Cannot assign to keys')
### following methods pinched from UserList and adapted ###
def __repr__(self): return repr(self._main._sequence)
# FIXME: do we need to check if we are comparing with another ``Keys``
# object? (like the __cast method of UserList)
def __lt__(self, other): return self._main._sequence < other
def __le__(self, other): return self._main._sequence <= other
def __eq__(self, other): return self._main._sequence == other
def __ne__(self, other): return self._main._sequence != other
def __gt__(self, other): return self._main._sequence > other
def __ge__(self, other): return self._main._sequence >= other
# FIXME: do we need __cmp__ as well as rich comparisons?
def __cmp__(self, other): return cmp(self._main._sequence, other)
def __contains__(self, item): return item in self._main._sequence
def __len__(self): return len(self._main._sequence)
def __iter__(self): return self._main.iterkeys()
def count(self, item): return self._main._sequence.count(item)
def index(self, item, *args): return self._main._sequence.index(item, *args)
def reverse(self): self._main._sequence.reverse()
def sort(self, *args, **kwds): self._main._sequence.sort(*args, **kwds)
def __mul__(self, n): return self._main._sequence*n
__rmul__ = __mul__
def __add__(self, other): return self._main._sequence + other
def __radd__(self, other): return other + self._main._sequence
## following methods not implemented for keys ##
def __delitem__(self, i): raise TypeError('Can\'t delete items from keys')
def __iadd__(self, other): raise TypeError('Can\'t add in place to keys')
def __imul__(self, n): raise TypeError('Can\'t multiply keys in place')
def append(self, item): raise TypeError('Can\'t append items to keys')
def insert(self, i, item): raise TypeError('Can\'t insert items into keys')
def pop(self, i=-1): raise TypeError('Can\'t pop items from keys')
def remove(self, item): raise TypeError('Can\'t remove items from keys')
def extend(self, other): raise TypeError('Can\'t extend keys')
class Items(object):
"""
Custom object for accessing the items of an OrderedDict.
Can be called like the normal ``OrderedDict.items`` method, but also
supports indexing and sequence methods.
"""
def __init__(self, main):
self._main = main
def __call__(self):
"""Pretend to be the items method."""
return self._main._items()
def __getitem__(self, index):
"""Fetch the item at position i."""
if isinstance(index, types.SliceType):
# fetching a slice returns an OrderedDict
return self._main[index].items()
key = self._main._sequence[index]
return (key, self._main[key])
def __setitem__(self, index, item):
"""Set item at position i to item."""
if isinstance(index, types.SliceType):
# NOTE: item must be an iterable (list of tuples)
self._main[index] = OrderedDict(item)
else:
# FIXME: Does this raise a sensible error?
orig = self._main.keys[index]
key, value = item
if self._main.strict and key in self and (key != orig):
raise ValueError('slice assignment must be from '
'unique keys')
# delete the current one
del self._main[self._main._sequence[index]]
self._main.insert(index, key, value)
def __delitem__(self, i):
"""Delete the item at position i."""
key = self._main._sequence[i]
if isinstance(i, types.SliceType):
for k in key:
# FIXME: efficiency?
del self._main[k]
else:
del self._main[key]
### following methods pinched from UserList and adapted ###
def __repr__(self): return repr(self._main.items())
# FIXME: do we need to check if we are comparing with another ``Items``
# object? (like the __cast method of UserList)
def __lt__(self, other): return self._main.items() < other
def __le__(self, other): return self._main.items() <= other
def __eq__(self, other): return self._main.items() == other
def __ne__(self, other): return self._main.items() != other
def __gt__(self, other): return self._main.items() > other
def __ge__(self, other): return self._main.items() >= other
def __cmp__(self, other): return cmp(self._main.items(), other)
def __contains__(self, item): return item in self._main.items()
def __len__(self): return len(self._main._sequence) # easier :-)
def __iter__(self): return self._main.iteritems()
def count(self, item): return self._main.items().count(item)
def index(self, item, *args): return self._main.items().index(item, *args)
def reverse(self): self._main.reverse()
def sort(self, *args, **kwds): self._main.sort(*args, **kwds)
def __mul__(self, n): return self._main.items()*n
__rmul__ = __mul__
def __add__(self, other): return self._main.items() + other
def __radd__(self, other): return other + self._main.items()
def append(self, item):
"""Add an item to the end."""
# FIXME: this is only append if the key isn't already present
key, value = item
self._main[key] = value
def insert(self, i, item):
key, value = item
self._main.insert(i, key, value)
def pop(self, i=-1):
key = self._main._sequence[i]
return (key, self._main.pop(key))
def remove(self, item):
key, value = item
try:
assert value == self._main[key]
except (KeyError, AssertionError):
raise ValueError('ValueError: list.remove(x): x not in list')
else:
del self._main[key]
def extend(self, other):
# FIXME: is only a true extend if none of the keys already present
for item in other:
key, value = item
self._main[key] = value
def __iadd__(self, other):
self.extend(other)
## following methods not implemented for items ##
def __imul__(self, n): raise TypeError('Can\'t multiply items in place')
class Values(object):
"""
Custom object for accessing the values of an OrderedDict.
Can be called like the normal ``OrderedDict.values`` method, but also
supports indexing and sequence methods.
"""
def __init__(self, main):
self._main = main
def __call__(self):
"""Pretend to be the values method."""
return self._main._values()
def __getitem__(self, index):
"""Fetch the value at position i."""
if isinstance(index, types.SliceType):
return [self._main[key] for key in self._main._sequence[index]]
else:
return self._main[self._main._sequence[index]]
def __setitem__(self, index, value):
"""
Set the value at position i to value.
You can only do slice assignment to values if you supply a sequence of
equal length to the slice you are replacing.
"""
if isinstance(index, types.SliceType):
keys = self._main._sequence[index]
if len(keys) != len(value):
raise ValueError('attempt to assign sequence of size %s '
'to slice of size %s' % (len(name), len(keys)))
# FIXME: efficiency? Would be better to calculate the indexes
# directly from the slice object
# NOTE: the new keys can collide with existing keys (or even
# contain duplicates) - these will overwrite
for key, val in zip(keys, value):
self._main[key] = val
else:
self._main[self._main._sequence[index]] = value
### following methods pinched from UserList and adapted ###
def __repr__(self): return repr(self._main.values())
# FIXME: do we need to check if we are comparing with another ``Values``
# object? (like the __cast method of UserList)
def __lt__(self, other): return self._main.values() < other
def __le__(self, other): return self._main.values() <= other
def __eq__(self, other): return self._main.values() == other
def __ne__(self, other): return self._main.values() != other
def __gt__(self, other): return self._main.values() > other
def __ge__(self, other): return self._main.values() >= other
def __cmp__(self, other): return cmp(self._main.values(), other)
def __contains__(self, item): return item in self._main.values()
def __len__(self): return len(self._main._sequence) # easier :-)
def __iter__(self): return self._main.itervalues()
def count(self, item): return self._main.values().count(item)
def index(self, item, *args): return self._main.values().index(item, *args)
def reverse(self):
"""Reverse the values"""
vals = self._main.values()
vals.reverse()
# FIXME: efficiency
self[:] = vals
def sort(self, *args, **kwds):
"""Sort the values."""
vals = self._main.values()
vals.sort(*args, **kwds)
self[:] = vals
def __mul__(self, n): return self._main.values()*n
__rmul__ = __mul__
def __add__(self, other): return self._main.values() + other
def __radd__(self, other): return other + self._main.values()
## following methods not implemented for values ##
def __delitem__(self, i): raise TypeError('Can\'t delete items from values')
def __iadd__(self, other): raise TypeError('Can\'t add in place to values')
def __imul__(self, n): raise TypeError('Can\'t multiply values in place')
def append(self, item): raise TypeError('Can\'t append items to values')
def insert(self, i, item): raise TypeError('Can\'t insert items into values')
def pop(self, i=-1): raise TypeError('Can\'t pop items from values')
def remove(self, item): raise TypeError('Can\'t remove items from values')
def extend(self, other): raise TypeError('Can\'t extend values')
class SequenceOrderedDict(OrderedDict):
"""
Experimental version of OrderedDict that has a custom object for ``keys``,
``values``, and ``items``.
These are callable sequence objects that work as methods, or can be
manipulated directly as sequences.
Test for ``keys``, ``items`` and ``values``.
>>> d = SequenceOrderedDict(((1, 2), (2, 3), (3, 4)))
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d.keys
[1, 2, 3]
>>> d.keys()
[1, 2, 3]
>>> d.setkeys((3, 2, 1))
>>> d
SequenceOrderedDict([(3, 4), (2, 3), (1, 2)])
>>> d.setkeys((1, 2, 3))
>>> d.keys[0]
1
>>> d.keys[:]
[1, 2, 3]
>>> d.keys[-1]
3
>>> d.keys[-2]
2
>>> d.keys[0:2] = [2, 1]
>>> d
SequenceOrderedDict([(2, 3), (1, 2), (3, 4)])
>>> d.keys.reverse()
>>> d.keys
[3, 1, 2]
>>> d.keys = [1, 2, 3]
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d.keys = [3, 1, 2]
>>> d
SequenceOrderedDict([(3, 4), (1, 2), (2, 3)])
>>> a = SequenceOrderedDict()
>>> b = SequenceOrderedDict()
>>> a.keys == b.keys
1
>>> a['a'] = 3
>>> a.keys == b.keys
0
>>> b['a'] = 3
>>> a.keys == b.keys
1
>>> b['b'] = 3
>>> a.keys == b.keys
0
>>> a.keys > b.keys
0
>>> a.keys < b.keys
1
>>> 'a' in a.keys
1
>>> len(b.keys)
2
>>> 'c' in d.keys
0
>>> 1 in d.keys
1
>>> [v for v in d.keys]
[3, 1, 2]
>>> d.keys.sort()
>>> d.keys
[1, 2, 3]
>>> d = SequenceOrderedDict(((1, 2), (2, 3), (3, 4)), strict=True)
>>> d.keys[::-1] = [1, 2, 3]
>>> d
SequenceOrderedDict([(3, 4), (2, 3), (1, 2)])
>>> d.keys[:2]
[3, 2]
>>> d.keys[:2] = [1, 3]
Traceback (most recent call last):
KeyError: 'Keylist is not the same as current keylist.'
>>> d = SequenceOrderedDict(((1, 2), (2, 3), (3, 4)))
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d.values
[2, 3, 4]
>>> d.values()
[2, 3, 4]
>>> d.setvalues((4, 3, 2))
>>> d
SequenceOrderedDict([(1, 4), (2, 3), (3, 2)])
>>> d.values[::-1]
[2, 3, 4]
>>> d.values[0]
4
>>> d.values[-2]
3
>>> del d.values[0]
Traceback (most recent call last):
TypeError: Can't delete items from values
>>> d.values[::2] = [2, 4]
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> 7 in d.values
0
>>> len(d.values)
3
>>> [val for val in d.values]
[2, 3, 4]
>>> d.values[-1] = 2
>>> d.values.count(2)
2
>>> d.values.index(2)
0
>>> d.values[-1] = 7
>>> d.values
[2, 3, 7]
>>> d.values.reverse()
>>> d.values
[7, 3, 2]
>>> d.values.sort()
>>> d.values
[2, 3, 7]
>>> d.values.append('anything')
Traceback (most recent call last):
TypeError: Can't append items to values
>>> d.values = (1, 2, 3)
>>> d
SequenceOrderedDict([(1, 1), (2, 2), (3, 3)])
>>> d = SequenceOrderedDict(((1, 2), (2, 3), (3, 4)))
>>> d
SequenceOrderedDict([(1, 2), (2, 3), (3, 4)])
>>> d.items()
[(1, 2), (2, 3), (3, 4)]
>>> d.setitems([(3, 4), (2 ,3), (1, 2)])
>>> d
SequenceOrderedDict([(3, 4), (2, 3), (1, 2)])
>>> d.items[0]
(3, 4)
>>> d.items[:-1]
[(3, 4), (2, 3)]
>>> d.items[1] = (6, 3)
>>> d.items
[(3, 4), (6, 3), (1, 2)]
>>> d.items[1:2] = [(9, 9)]
>>> d
SequenceOrderedDict([(3, 4), (9, 9), (1, 2)])
>>> del d.items[1:2]
>>> d
SequenceOrderedDict([(3, 4), (1, 2)])
>>> (3, 4) in d.items
1
>>> (4, 3) in d.items
0
>>> len(d.items)
2
>>> [v for v in d.items]
[(3, 4), (1, 2)]
>>> d.items.count((3, 4))
1
>>> d.items.index((1, 2))
1
>>> d.items.index((2, 1))
Traceback (most recent call last):
ValueError: list.index(x): x not in list
>>> d.items.reverse()
>>> d.items
[(1, 2), (3, 4)]
>>> d.items.reverse()
>>> d.items.sort()
>>> d.items
[(1, 2), (3, 4)]
>>> d.items.append((5, 6))
>>> d.items
[(1, 2), (3, 4), (5, 6)]
>>> d.items.insert(0, (0, 0))
>>> d.items
[(0, 0), (1, 2), (3, 4), (5, 6)]
>>> d.items.insert(-1, (7, 8))
>>> d.items
[(0, 0), (1, 2), (3, 4), (7, 8), (5, 6)]
>>> d.items.pop()
(5, 6)
>>> d.items
[(0, 0), (1, 2), (3, 4), (7, 8)]
>>> d.items.remove((1, 2))
>>> d.items
[(0, 0), (3, 4), (7, 8)]
>>> d.items.extend([(1, 2), (5, 6)])
>>> d.items
[(0, 0), (3, 4), (7, 8), (1, 2), (5, 6)]
"""
def __init__(self, init_val=(), strict=True):
OrderedDict.__init__(self, init_val, strict=strict)
self._keys = self.keys
self._values = self.values
self._items = self.items
self.keys = Keys(self)
self.values = Values(self)
self.items = Items(self)
self._att_dict = {
'keys': self.setkeys,
'items': self.setitems,
'values': self.setvalues,
}
def __setattr__(self, name, value):
"""Protect keys, items, and values."""
if not '_att_dict' in self.__dict__:
object.__setattr__(self, name, value)
else:
try:
fun = self._att_dict[name]
except KeyError:
OrderedDict.__setattr__(self, name, value)
else:
fun(value)
# Tile wrapping copied from http://tkinter.unpythonic.net/wiki/TileWrapper.
# Will be able to replace with ttk from Python 2.7, eventually.
# only create these gui classes if Tkinter is available (so param does
# not depend on Tkinter).
try:
import Tkinter
from Tkconstants import * # CB: should get the specific imports and move to section below
Tkinter_imported = True
except ImportError:
Tkinter_imported = False
if Tkinter_imported:
if Tkinter.TkVersion >= 8.5:
class Style:
def default(self, style, **kw):
"""Sets the default value of the specified option(s) in style"""
pass
def map_style(self, **kw):
"""Sets dynamic values of the specified option(s) in style. See
"STATE MAPS", below."""
pass
def layout(self, style, layoutSpec):
"""Define the widget layout for style style. See "LAYOUTS" below
for the format of layoutSpec. If layoutSpec is omitted, return the
layout specification for style style. """
pass
def element_create(self, name, type, *args):
"""Creates a new element in the current theme of type type. The
only built-in element type is image (see image(n)), although
themes may define other element types (see
Ttk_RegisterElementFactory).
"""
pass
def element_names(self):
"""Returns a list of all elements defined in the current theme. """
pass
def theme_create(self, name, parent=None, basedon=None):
"""Creates a new theme. It is an error if themeName already exists.
If -parent is specified, the new theme will inherit styles, elements,
and layouts from the parent theme basedon. If -settings is present,
script is evaluated in the context of the new theme as per style theme
settings.
"""
pass
def theme_settings(self, name, script):
"""Temporarily sets the current theme to themeName, evaluate script,
then restore the previous theme. Typically script simply defines styles
and elements, though arbitrary Tcl code may appear.
"""
pass
def theme_names(self):
"""Returns a list of the available themes. """
return self.tk.call("style", "theme", "names")
def theme_use(self, theme):
"""Sets the current theme to themeName, and refreshes all widgets."""
return self.tk.call("style", "theme", "use", theme)
class Widget(Tkinter.Widget, Style):
def __init__(self, master, widgetName=None, cnf={}, kw={}, extra=()):
if not widgetName:
## why you would ever want to create a Tile Widget is behond me!
widgetName="ttk::widget"
Tkinter.Widget.__init__(self, master, widgetName, cnf, kw)
def instate(self, spec=None, script=None):
"""Test the widget's state. If script is not specified, returns 1
if the widget state matches statespec and 0 otherwise. If script
is specified, equivalent to if {[pathName instate stateSpec]}
script.
"""
return self.tk.call(self._w, "instate", spec, script)
def state(self, spec=None):
"""Modify or inquire widget state. If stateSpec is present, sets
the widget state: for each flag in stateSpec, sets the corresponding
flag or clears it if prefixed by an exclamation point. Returns a new
state spec indicating which flags were changed: ''set changes
[pathName state spec] ; pathName state $changes'' will restore
pathName to the original state. If stateSpec is not specified,
returns a list of the currently-enabled state flags.
"""
return self.tk.call(self._w, "state", spec)
class Button(Widget, Tkinter.Button):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::button", cnf, kw)
###add frame support here--KWs
class Frame(Widget, Tkinter.Frame):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::frame", cnf, kw)
class Checkbutton(Widget, Tkinter.Checkbutton):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::checkbutton", cnf, kw)
class Combobox(Widget, Tkinter.Entry):
def __init__(self, master=None, cnf={}, **kw):
# HACK to work around strange parsing of list
if 'values' in kw:
values = kw['values']
if isinstance(values,list):
kw['values'] = tuple(values)
Widget.__init__(self, master, "ttk::combobox", cnf, kw)
def current(self, index=None):
"""If index is supplied, sets the combobox value to the element
at position newIndex in the list of -values. Otherwise, returns
the index of the current value in the list of -values or -1 if
the current value does not appear in the list.
"""
return self.tk.call(self._w, "current", index)
class Entry(Widget, Tkinter.Entry):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::entry", cnf, kw)
def validate(self):
"""Force revalidation, independent of the conditions specified by
the -validate option. Returns 0 if the -validatecommand returns a
false value, or 1 if it returns a true value or is not specified.
"""
return self.tk.call(self._w, "validate")
class Label(Widget, Tkinter.Label):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::label", cnf, kw)
###add LabelFrame class here--KW
class LabelFrame(Widget, Tkinter.Label):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::labelframe", cnf, kw)
class Menubutton(Widget, Tkinter.Menubutton):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::menubutton", cnf, kw)
class Notebook(Widget):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::notebook", cnf, kw)
def add(self, child, cnf=(), **kw):
"""Adds a new tab to the notebook. When the tab is selected, the
child window will be displayed. child must be a direct child of
the notebook window. See TAB OPTIONS for the list of available
options.
"""
return self.tk.call((self._w, "add", child) + self._options(cnf, kw))
def forget(self, index):
"""Removes the tab specified by index, unmaps and unmanages the
associated child window.
"""
return self.tk.call(self._w, "forget", index)
def index(self, index):
"""Returns the numeric index of the tab specified by index, or
the total number of tabs if index is the string "end".
"""
return self.tk.call(self._w, "index")
def select(self, index):
"""Selects the specified tab; the associated child pane will
be displayed, and the previously-selected pane (if different)
is unmapped.
"""
return self.tk.call(self._w, "select", index)
def tab(self, index, **kw):
"""Query or modify the options of the specific tab. If no
-option is specified, returns a dictionary of the tab option
values. If one -option is specified, returns the value of tha
t option. Otherwise, sets the -options to the corresponding
values. See TAB OPTIONS for the available options.
"""
return self.tk.call((self._w, "tab", index) + self._options(kw))
def tabs(self):
"""Returns a list of all pane windows managed by the widget."""
return self.tk.call(self._w, "tabs")
class Paned(Widget):
"""
WIDGET OPTIONS
Name Database name Database class
-orient orient Orient
Specifies the orientation of the window. If vertical, subpanes
are stacked top-to-bottom; if horizontal, subpanes are stacked
left-to-right.
PANE OPTIONS
The following options may be specified for each pane:
Name Database name Database class
-weight weight Weight
An integer specifying the relative stretchability of the pane.
When the paned window is resized, the extra space is added or
subracted to each pane proportionally to its -weight
"""
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::panedwindow", cnf, kw)
def add(self, subwindow, **kw):
"""Adds a new pane to the window. subwindow must be a direct child of
the paned window pathname. See PANE OPTIONS for the list of available
options.
"""
return self.tk.call((self._w, "add", subwindow) + self._options(kw))
def forget(self, pane):
"""Removes the specified subpane from the widget. pane is either an
integer index or the name of a managed subwindow.
"""
self.tk.call(self._w, "forget", pane)
def insert(self, pos, subwindow, **kw):
"""Inserts a pane at the specified position. pos is either the string
end, an integer index, or the name of a managed subwindow. If subwindow
is already managed by the paned window, moves it to the specified
position. See PANE OPTIONS for the list of available options.
"""
return self.tk.call((self._w, "insert", pos, subwindow) + self._options(kw))
def pane(self, pane, **kw):
"""Query or modify the options of the specified pane, where pane is
either an integer index or the name of a managed subwindow. If no
-option is specified, returns a dictionary of the pane option values.
If one -option is specified, returns the value of that option.
Otherwise, sets the -options to the corresponding values.
"""
return self.tk.call((self._w, "pane", pane) + self._options(kw))
class Progressbar(Widget):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::progressbar", cnf, kw)
def step(self, amount=1.0):
"""Increments the -value by amount. amount defaults to 1.0
if omitted. """
return self.tk.call(self._w, "step", amount)
def start(self):
self.tk.call("ttk::progressbar::start", self._w)
def stop(self):
self.tk.call("ttk::progressbar::stop", self._w)
class Radiobutton(Widget, Tkinter.Radiobutton):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::radiobutton", cnf, kw)
class Scrollbar(Widget, Tkinter.Scrollbar):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::scrollbar", cnf, kw)
class Separator(Widget):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, "ttk::separator", cnf, kw)
class Treeview(Widget, Tkinter.Listbox):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, 'ttk::treeview', cnf, kw)
def children(self, item, newchildren=None):
"""If newchildren is not specified, returns the list of
children belonging to item.
If newchildren is specified, replaces item's child list
with newchildren. Items in the old child list not present
in the new child list are detached from the tree. None of
the items in newchildren may be an ancestor of item.
"""
return self.tk.call(self._w, "children", item, newchildren)
def column(self, column, **kw):
"""Query or modify the options for the specified column.
If no options are specified, returns a dictionary of
option/value pairs. If a single option is specified,
returns the value of that option. Otherwise, the options
are updated with the specified values. The following
options may be set on each column:
-id name
The column name. This is a read-only option. For example,
[$pathname column #n -id] returns the data column
associated with data column #n.
-anchor
Specifies how the text in this column should be aligned
with respect to the cell. One of n, ne, e, se, s, sw, w,
nw, or center.
-width w
The width of the column in pixels. Default is something
reasonable, probably 200 or so.
"""
pass
def delete(self, items):
"""Deletes each of the items and all of their descendants.
The root item may not be deleted. See also: detach.
"""
return self.tk.call(self._w, "delete", items)
def detach(self, items):
"""Unlinks all of the specified items from the tree. The
items and all of their descendants are still present and
may be reinserted at another point in the tree but will
not be displayed. The root item may not be detached. See
also: delete.
"""
return self.tk.call(self._w, "detach", items)
def exists(self, item):
"""Returns 1 if the specified item is present in the
tree, 0 otherwise.
"""
return self.tk.call(self._w, "exists", item)
def focus(self, item=None):
"""If item is specified, sets the focus item to item.
Otherwise, returns the current focus item, or {} if there
is none.
"""
return self.tk.call(self._w, "focus", item)
def heading(self, column, **kw):
"""Query or modify the heading options for the specified
column. Valid options are:
-text text
The text to display in the column heading.
-image imageName
Specifies an image to display to the right of the column heading.
-command script
A script to evaluate when the heading label is pressed.
"""
pass
def identify(self, x, y):
"""Returns a description of the widget component under the point given
by x and y. The return value is a list with one of the following forms:
heading #n
The column heading for display column #n.
separator #n
The border to the right of display column #n.
cell itemid #n
The data value for item itemid in display column #n.
item itemid element
The tree label for item itemid; element is one of text, image, or
indicator, or another element name depending on the style.
row itemid
The y position is over the item but x does not identify any element
or displayed data value.
nothing
The coordinates are not over any identifiable object.
See COLUMN IDENTIFIERS for a discussion of display columns and data
columns.
"""
pass
def index(self, item):
"""Returns the integer index of item within its parent's list of
children.
"""
pass
def insert(self, parent, index, id=None, **kw):
"""Creates a new item. parent is the item ID of the parent item, or
the empty string {} to create a new top-level item. index is an
integer, or the value end, specifying where in the list of parent's
children to insert the new item. If index is less than or equal to
zero, the new node is inserted at the beginning; if index is greater
than or equal to the current number of children, it is inserted at the
end. If -id is specified, it is used as the item identifier; id must
not already exist in the tree. Otherwise, a new unique identifier is
generated.
returns the item identifier of the newly created item. See ITEM
OPTIONS for the list of available options.
"""
pass
def item(item, **kw):
"""Query or modify the options for the specified item. If no -option
is specified, returns a dictionary of option/value pairs. If a single
-option is specified, returns the value of that option. Otherwise, the
item's options are updated with the specified values. See ITEM OPTIONS
for the list of available options.
"""
pass
def move(self, item, parent, index):
"""Moves item to position index in parent's list of children. It is
illegal to move an item under one of its descendants.
If index is less than or equal to zero, item is moved to the
beginning; if greater than or equal to the number of children, it's
moved to the end.
"""
pass
def next(self, item):
"""Returns the identifier of item's next sibling, or {} if item is the
last child of its parent.
"""
pass
def parent(self, item):
"""Returns the ID of the parent of item, or {} if item is at the top
level of the hierarchy.
"""
pass
def prev(self, item):
"""Returns the identifier of item's previous sibling, or {} if item is
the first child of its parent.
"""
pass
def selection(self):
"""Returns the list of selected items"""
pass
def selection_set(self, items):
"""items becomes the new selection. """
pass
def selection_add(self, items):
"""Add items to the selection """
pass
def selection_remove(self, items):
"""Remove items from the selection """
pass
def selection_toggle(self, items):
"""Toggle the selection state of each item in items. """
pass
def set(self, item, column, value=None):
"""If value is specified, sets the value of column column in item item,
otherwise returns the current value. See COLUMN IDENTIFIERS.
"""
pass
else:
print "GUI: tcl/tk version is older than 8.5; using simple back-up widgets."
# In the future, could add more fake tile widgets (or handle more methods of
# existing ones) if required.
class FakeCombobox(Tkinter.OptionMenu):
def __init__(self, master=None, textvariable=None,values=None,state=None,**kw):
# missing state=readonly
# missing current()
Tkinter.OptionMenu.__init__(self,master,textvariable,*values)
Combobox = FakeCombobox
class FakeProgressbar(Tkinter.Frame):
def __init__(self,master=None,cnf={},**kw):
Tkinter.Frame.__init__(self,master)
def step(self,amount=1.0):
pass
def start(self):
pass
def stop(self):
pass
Progressbar = FakeProgressbar
# CB: tix has Notebook, Combobox, and Meter, but I wouldn't
# want to rely on Tix being present (even though it is
# supposed to be part of Python's standard library).
| bsd-3-clause | 419,085,562,894,774,600 | 34.977368 | 93 | 0.506839 | false |
andrecunha/idd3 | examine.py | 1 | 2041 | # -*- coding: utf-8 -*-
# IDD3 - Propositional Idea Density from Dependency Trees
# Copyright (C) 2014-2015 Andre Luiz Verucci da Cunha
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals, division
import pprint
import idd3
from idd3 import Relation, Engine
from idd3.rules import en
import nltk
from sys import argv
import logging
logging.basicConfig(level=logging.DEBUG)
try:
from termcolor import colored
except ImportError:
def colored(string, color, attrs):
return string
def demo():
idd3.use_language(en)
graphs = nltk.parse.dependencygraph.DependencyGraph.load(argv[1])
index = int(argv[2]) - 1
engine = Engine(idd3.all_rulesets, idd3.all_transformations)
relations = []
for relation in graphs[index].nodelist:
relations.append(Relation(**relation))
print(colored('Sentence %d:' % (index + 1), 'white', attrs=['bold']))
pprint.pprint(relations)
print(colored('Propositions:', 'white', attrs=['bold']))
engine.analyze(relations)
for i, prop in enumerate(engine.props):
print(str(i + 1) + ' ' + str(prop))
print(colored('Unprocessed relations:', 'white', attrs=['bold']))
for relation in engine.get_unprocessed_relations(relations):
print(relation)
if __name__ == '__main__':
if len(argv) != 3:
print('Usage: python', argv[0], '<conll file>', '<index>')
else:
demo()
| gpl-3.0 | 3,371,991,498,734,300,700 | 30.890625 | 78 | 0.696227 | false |
JMSwag/jms-utils | jms_utils/terminal.py | 1 | 8327 | # --------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2014-2016 Digital Sapphire
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# --------------------------------------------------------------------------
from __future__ import print_function
import logging
try:
import msvcrt
except ImportError:
msvcrt = None
import locale
import optparse
import os
import platform
import shlex
import struct
import subprocess
import sys
try:
import termios
except ImportError:
termios = None
try:
import tty
except ImportError:
tty = None
import six
log = logging.getLogger(__name__)
def print_to_console(text):
enc = locale.getdefaultlocale()[1] or "utf-8"
try:
print(text.encode(enc, errors="backslashreplace"))
except (LookupError, UnicodeEncodeError):
# Unknown encoding or encoding problem. Fallback to ascii
print(text.encode("ascii", errors="backslashreplace"))
def terminal_formatter():
max_width = 80
max_help_position = 80
# No need to wrap help messages if we're on a wide console
columns = get_terminal_size()[0]
if columns:
max_width = columns
fmt = optparse.IndentedHelpFormatter(width=max_width,
max_help_position=max_help_position)
return fmt
# get width and height of console
# works on linux, os x, windows, cygwin(windows)
# originally retrieved from:
# http://stackoverflow.com/questions/
# 566746/how-to-get-console-window-width-in-python
def get_terminal_size():
current_os = platform.system()
tuple_xy = None
if current_os == u'Windows':
tuple_xy = _get_terminal_size_windows()
if tuple_xy is None:
tuple_xy = _get_terminal_size_tput()
# needed for window's python in cygwin's xterm!
if current_os in [u'Linux', u'Darwin'] or current_os.startswith('CYGWIN'):
tuple_xy = _get_terminal_size_linux()
if tuple_xy is None:
log.debug(u"default")
tuple_xy = (80, 25) # default value
return tuple_xy
def _get_terminal_size_windows():
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
except:
pass
def _get_terminal_size_tput():
# get terminal width
# http://stackoverflow.com/questions/263890/
# how-do-i-find-the-width-height-of-a-terminal-window
try:
cols = int(subprocess.check_call(shlex.split('tput cols')))
rows = int(subprocess.check_call(shlex.split('tput lines')))
return (cols, rows)
except:
pass
def _get_terminal_size_linux():
def ioctl_GWINSZ(fd):
try:
import fcntl
# Is this required
# import termios
cr = struct.unpack('hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
return cr
except:
pass
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except:
return None
return int(cr[1]), int(cr[0])
# Gets a single character form standard input. Does not echo to the screen
class GetCh:
def __init__(self):
if sys.platform == u'win32':
self.impl = _GetchWindows()
else:
self.impl = _GetchUnix()
def __call__(self):
return self.impl()
class _GetchUnix:
def __init__(self):
pass
def __call__(self):
pass
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
pass
def __call__(self):
return msvcrt.getch()
def ask_yes_no(question, default='no', answer=None):
u"""Will ask a question and keeps prompting until
answered.
Args:
question (str): Question to ask end user
default (str): Default answer if user just press enter at prompt
answer (str): Used for testing
Returns:
(bool) Meaning:
True - Answer is yes
False - Answer is no
"""
default = default.lower()
yes = [u'yes', u'ye', u'y']
no = [u'no', u'n']
if default in no:
help_ = u'[N/y]?'
default = False
else:
default = True
help_ = u'[Y/n]?'
while 1:
display = question + '\n' + help_
if answer is None:
log.debug(u'Under None')
answer = six.moves.input(display)
answer = answer.lower()
if answer == u'':
log.debug(u'Under blank')
return default
if answer in yes:
log.debug(u'Must be true')
return True
elif answer in no:
log.debug(u'Must be false')
return False
else:
sys.stdout.write(u'Please answer yes or no only!\n\n')
sys.stdout.flush()
answer = None
six.moves.input(u'Press enter to continue')
sys.stdout.write('\n\n\n\n\n')
sys.stdout.flush()
def get_correct_answer(question, default=None, required=False,
answer=None, is_answer_correct=None):
u"""Ask user a question and confirm answer
Args:
question (str): Question to ask user
default (str): Default answer if no input from user
required (str): Require user to input answer
answer (str): Used for testing
is_answer_correct (str): Used for testing
"""
while 1:
if default is None:
msg = u' - No Default Available'
else:
msg = (u'\n[DEFAULT] -> {}\nPress Enter To '
u'Use Default'.format(default))
prompt = question + msg + u'\n--> '
if answer is None:
answer = six.moves.input(prompt)
if answer == '' and required and default is not None:
print(u'You have to enter a value\n\n')
six.moves.input(u'Press enter to continue')
print(u'\n\n')
answer = None
continue
if answer == u'' and default is not None:
answer = default
_ans = ask_yes_no(u'You entered {}, is this '
u'correct?'.format(answer),
answer=is_answer_correct)
if _ans:
return answer
else:
answer = None
| mit | -2,106,631,287,010,268,000 | 28.217544 | 79 | 0.578119 | false |
f5devcentral/f5-cccl | f5_cccl/resource/net/fdb/record.py | 1 | 1541 | """Provides a class for managing BIG-IP FDB tunnel record resources."""
# coding=utf-8
#
# Copyright (c) 2017-2021 F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from f5_cccl.resource import Resource
from f5_cccl.utils.route_domain import normalize_address_with_route_domain
LOGGER = logging.getLogger(__name__)
class Record(Resource):
"""Record class for managing network configuration on BIG-IP."""
properties = dict(name=None, endpoint=None)
def __init__(self, name, default_route_domain, **data):
"""Create a record from CCCL recordType."""
super(Record, self).__init__(name, partition=None)
endpoint = data.get('endpoint', None)
self._data['endpoint'] = normalize_address_with_route_domain(
endpoint, default_route_domain)[0]
def __eq__(self, other):
if not isinstance(other, Record):
return False
return super(Record, self).__eq__(other)
def _uri_path(self, bigip):
raise NotImplementedError
| apache-2.0 | 8,939,869,457,900,549,000 | 32.5 | 74 | 0.69695 | false |
emilkjer/django-model-utils | model_utils/managers.py | 1 | 8414 | from types import ClassType
import warnings
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.fields.related import OneToOneField
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
import django
class InheritanceQuerySet(QuerySet):
def select_subclasses(self, *subclasses):
if not subclasses:
subclasses = [rel.var_name for rel in self.model._meta.get_all_related_objects()
if isinstance(rel.field, OneToOneField)
and issubclass(rel.field.model, self.model)]
new_qs = self.select_related(*subclasses)
new_qs.subclasses = subclasses
return new_qs
def _clone(self, klass=None, setup=False, **kwargs):
for name in ['subclasses', '_annotated']:
if hasattr(self, name):
kwargs[name] = getattr(self, name)
return super(InheritanceQuerySet, self)._clone(klass, setup, **kwargs)
def annotate(self, *args, **kwargs):
qset = super(InheritanceQuerySet, self).annotate(*args, **kwargs)
qset._annotated = [a.default_alias for a in args] + kwargs.keys()
return qset
def get_subclass(self, obj):
"""
FIX see https://bitbucket.org/carljm/django-model-utils/pull-request/5/patch-to-issue-16/diff
and https://bitbucket.org/carljm/django-model-utils/issue/15/mti-problem-with-select_subclasses
"""
def get_attribute(obj, s):
try:
return getattr(obj,s, False)
except obj.__class__.DoesNotExist:
return False
if django.VERSION[0:2] < (1, 5):
sub_obj = [getattr(obj, s) for s in self.subclasses if getattr(obj, s)] or [obj]
else:
sub_obj = [getattr(obj, s) for s in self.subclasses if get_attribute(obj, s)] or [obj]
return sub_obj[0]
def iterator(self):
iter = super(InheritanceQuerySet, self).iterator()
if getattr(self, 'subclasses', False):
for obj in iter:
sub_obj = self.get_subclass(obj)
if getattr(self, '_annotated', False):
for k in self._annotated:
setattr(sub_obj, k, getattr(obj, k))
yield sub_obj
else:
for obj in iter:
yield obj
class InheritanceManager(models.Manager):
use_for_related_fields = True
def get_query_set(self):
return InheritanceQuerySet(self.model)
def select_subclasses(self, *subclasses):
return self.get_query_set().select_subclasses(*subclasses)
def get_subclass(self, *args, **kwargs):
return self.get_query_set().select_subclasses().get(*args, **kwargs)
class InheritanceCastMixin(object):
def cast(self):
results = tuple(self.values_list('pk', 'real_type'))
type_to_pks = {}
for pk, real_type_id in results:
type_to_pks.setdefault(real_type_id, []).append(pk)
content_types = ContentType.objects.in_bulk(type_to_pks.keys())
pk_to_child = {}
for real_type_id, pks in type_to_pks.iteritems():
content_type = content_types[real_type_id]
child_type = content_type.model_class()
children = child_type._default_manager.in_bulk(pks)
for pk, child in children.iteritems():
pk_to_child[pk] = child
children = []
# sort children into same order as parents where returned
for pk, real_type_id in results:
children.append(pk_to_child[pk])
return children
class QueryManager(models.Manager):
def __init__(self, *args, **kwargs):
if args:
self._q = args[0]
else:
self._q = models.Q(**kwargs)
super(QueryManager, self).__init__()
def order_by(self, *args):
self._order_by = args
return self
def get_query_set(self):
qs = super(QueryManager, self).get_query_set().filter(self._q)
if hasattr(self, '_order_by'):
return qs.order_by(*self._order_by)
return qs
class PassThroughManager(models.Manager):
"""
Inherit from this Manager to enable you to call any methods from your
custom QuerySet class from your manager. Simply define your QuerySet
class, and return an instance of it from your manager's `get_query_set`
method.
Alternately, if you don't need any extra methods on your manager that
aren't on your QuerySet, then just pass your QuerySet class to the
``for_queryset_class`` class method.
class PostQuerySet(QuerySet):
def enabled(self):
return self.filter(disabled=False)
class Post(models.Model):
objects = PassThroughManager.for_queryset_class(PostQuerySet)()
"""
# pickling causes recursion errors
_deny_methods = ['__getstate__', '__setstate__', '_db']
def __init__(self, queryset_cls=None):
self._queryset_cls = queryset_cls
super(PassThroughManager, self).__init__()
def __getattr__(self, name):
if name in self._deny_methods:
raise AttributeError(name)
return getattr(self.get_query_set(), name)
def get_query_set(self):
if self._queryset_cls is not None:
kargs = {'model': self.model}
if hasattr(self, '_db'):
kargs['using'] = self._db
return self._queryset_cls(**kargs)
return super(PassThroughManager, self).get_query_set()
@classmethod
def for_queryset_class(cls, queryset_cls):
class _PassThroughManager(cls):
def __init__(self):
return super(_PassThroughManager, self).__init__()
def get_query_set(self):
kwargs = {}
if hasattr(self, "_db"):
kwargs["using"] = self._db
return queryset_cls(self.model, **kwargs)
return _PassThroughManager
def manager_from(*mixins, **kwds):
"""
Returns a Manager instance with extra methods, also available and
chainable on generated querysets.
(By George Sakkis, originally posted at
http://djangosnippets.org/snippets/2117/)
:param mixins: Each ``mixin`` can be either a class or a function. The
generated manager and associated queryset subclasses extend the mixin
classes and include the mixin functions (as methods).
:keyword queryset_cls: The base queryset class to extend from
(``django.db.models.query.QuerySet`` by default).
:keyword manager_cls: The base manager class to extend from
(``django.db.models.manager.Manager`` by default).
"""
warnings.warn(
"manager_from is pending deprecation; use PassThroughManager instead.",
PendingDeprecationWarning,
stacklevel=2)
# collect separately the mixin classes and methods
bases = [kwds.get('queryset_cls', QuerySet)]
methods = {}
for mixin in mixins:
if isinstance(mixin, (ClassType, type)):
bases.append(mixin)
else:
try: methods[mixin.__name__] = mixin
except AttributeError:
raise TypeError('Mixin must be class or function, not %s' %
mixin.__class__)
# create the QuerySet subclass
id = hash(mixins + tuple(kwds.iteritems()))
new_queryset_cls = type('Queryset_%d' % id, tuple(bases), methods)
# create the Manager subclass
bases[0] = manager_cls = kwds.get('manager_cls', Manager)
new_manager_cls = type('Manager_%d' % id, tuple(bases), methods)
# and finally override new manager's get_query_set
super_get_query_set = manager_cls.get_query_set
def get_query_set(self):
# first honor the super manager's get_query_set
qs = super_get_query_set(self)
# and then try to bless the returned queryset by reassigning it to the
# newly created Queryset class, though this may not be feasible
if not issubclass(new_queryset_cls, qs.__class__):
raise TypeError('QuerySet subclass conflict: cannot determine a '
'unique class for queryset instance')
qs.__class__ = new_queryset_cls
return qs
new_manager_cls.get_query_set = get_query_set
return new_manager_cls()
| bsd-3-clause | 8,724,807,216,674,744,000 | 36.395556 | 103 | 0.611719 | false |
easytaxibr/airflow | tests/models.py | 1 | 29842 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import logging
import os
import unittest
import time
from airflow import models, settings, AirflowException
from airflow.exceptions import AirflowSkipException
from airflow.models import DAG, TaskInstance as TI
from airflow.models import State as ST
from airflow.models import DagModel
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.ti_deps.deps.trigger_rule_dep import TriggerRuleDep
from airflow.utils.state import State
from mock import patch
from nose_parameterized import parameterized
from tests.core import TEST_DAG_FOLDER
DEFAULT_DATE = datetime.datetime(2016, 1, 1)
TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
class DagTest(unittest.TestCase):
def test_parms_not_passed_is_empty_dict(self):
"""
Test that when 'params' is _not_ passed to a new Dag, that the params
attribute is set to an empty dictionary.
"""
dag = models.DAG('test-dag')
self.assertEqual(dict, type(dag.params))
self.assertEqual(0, len(dag.params))
def test_params_passed_and_params_in_default_args_no_override(self):
"""
Test that when 'params' exists as a key passed to the default_args dict
in addition to params being passed explicitly as an argument to the
dag, that the 'params' key of the default_args dict is merged with the
dict of the params argument.
"""
params1 = {'parameter1': 1}
params2 = {'parameter2': 2}
dag = models.DAG('test-dag',
default_args={'params': params1},
params=params2)
params_combined = params1.copy()
params_combined.update(params2)
self.assertEqual(params_combined, dag.params)
def test_dag_as_context_manager(self):
"""
Test DAG as a context manager.
When used as a context manager, Operators are automatically added to
the DAG (unless they specifiy a different DAG)
"""
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
dag2 = DAG(
'dag2',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner2'})
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2', dag=dag2)
self.assertIs(op1.dag, dag)
self.assertEqual(op1.owner, 'owner1')
self.assertIs(op2.dag, dag2)
self.assertEqual(op2.owner, 'owner2')
with dag2:
op3 = DummyOperator(task_id='op3')
self.assertIs(op3.dag, dag2)
self.assertEqual(op3.owner, 'owner2')
with dag:
with dag2:
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
self.assertIs(op4.dag, dag2)
self.assertIs(op5.dag, dag)
self.assertEqual(op4.owner, 'owner2')
self.assertEqual(op5.owner, 'owner1')
with DAG('creating_dag_in_cm', start_date=DEFAULT_DATE) as dag:
DummyOperator(task_id='op6')
self.assertEqual(dag.dag_id, 'creating_dag_in_cm')
self.assertEqual(dag.tasks[0].task_id, 'op6')
def test_dag_topological_sort(self):
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B
# A -> C -> D
# ordered: B, D, C, A or D, B, C, A or D, C, B, A
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op1.set_upstream([op2, op3])
op3.set_upstream(op4)
topological_list = dag.topological_sort()
logging.info(topological_list)
tasks = [op2, op3, op4]
self.assertTrue(topological_list[0] in tasks)
tasks.remove(topological_list[0])
self.assertTrue(topological_list[1] in tasks)
tasks.remove(topological_list[1])
self.assertTrue(topological_list[2] in tasks)
tasks.remove(topological_list[2])
self.assertTrue(topological_list[3] == op1)
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# C -> (A u B) -> D
# C -> E
# ordered: E | D, A | B, C
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op5 = DummyOperator(task_id='E')
op1.set_downstream(op3)
op2.set_downstream(op3)
op1.set_upstream(op4)
op2.set_upstream(op4)
op5.set_downstream(op3)
topological_list = dag.topological_sort()
logging.info(topological_list)
set1 = [op4, op5]
self.assertTrue(topological_list[0] in set1)
set1.remove(topological_list[0])
set2 = [op1, op2]
set2.extend(set1)
self.assertTrue(topological_list[1] in set2)
set2.remove(topological_list[1])
self.assertTrue(topological_list[2] in set2)
set2.remove(topological_list[2])
self.assertTrue(topological_list[3] in set2)
self.assertTrue(topological_list[4] == op3)
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
self.assertEquals(tuple(), dag.topological_sort())
class DagRunTest(unittest.TestCase):
def setUp(self):
self.dagbag = models.DagBag(dag_folder=TEST_DAG_FOLDER)
def create_dag_run(self, dag_id, state=State.RUNNING, task_states=None):
now = datetime.datetime.now()
dag = self.dagbag.get_dag(dag_id)
dag_run = dag.create_dagrun(
run_id='manual__' + now.isoformat(),
execution_date=now,
start_date=now,
state=State.RUNNING,
external_trigger=False,
)
if task_states is not None:
session = settings.Session()
for task_id, state in task_states.items():
ti = dag_run.get_task_instance(task_id)
ti.set_state(state, session)
session.close()
return dag_run
def test_id_for_date(self):
run_id = models.DagRun.id_for_date(
datetime.datetime(2015, 1, 2, 3, 4, 5, 6, None))
self.assertEqual(
'scheduled__2015-01-02T03:04:05', run_id,
'Generated run_id did not match expectations: {0}'.format(run_id))
def test_dagrun_running_when_upstream_skipped(self):
"""
Tests that a DAG run is not failed when an upstream task is skipped
"""
initial_task_states = {
'test_short_circuit_false': State.SUCCESS,
'test_state_skipped1': State.SKIPPED,
'test_state_skipped2': State.NONE,
}
# dags/test_dagrun_short_circuit_false.py
dag_run = self.create_dag_run('test_dagrun_short_circuit_false',
state=State.RUNNING,
task_states=initial_task_states)
updated_dag_state = dag_run.update_state()
self.assertEqual(State.RUNNING, updated_dag_state)
def test_dagrun_success_when_all_skipped(self):
"""
Tests that a DAG run succeeds when all tasks are skipped
"""
initial_task_states = {
'test_short_circuit_false': State.SUCCESS,
'test_state_skipped1': State.SKIPPED,
'test_state_skipped2': State.SKIPPED,
}
# dags/test_dagrun_short_circuit_false.py
dag_run = self.create_dag_run('test_dagrun_short_circuit_false',
state=State.RUNNING,
task_states=initial_task_states)
updated_dag_state = dag_run.update_state()
self.assertEqual(State.SUCCESS, updated_dag_state)
class DagBagTest(unittest.TestCase):
def test_get_existing_dag(self):
"""
test that were're able to parse some example DAGs and retrieve them
"""
dagbag = models.DagBag(include_examples=True)
some_expected_dag_ids = ["example_bash_operator",
"example_branch_operator"]
for dag_id in some_expected_dag_ids:
dag = dagbag.get_dag(dag_id)
self.assertIsNotNone(dag)
self.assertEqual(dag_id, dag.dag_id)
self.assertGreaterEqual(dagbag.size(), 7)
def test_get_non_existing_dag(self):
"""
test that retrieving a non existing dag id returns None without crashing
"""
dagbag = models.DagBag(include_examples=True)
non_existing_dag_id = "non_existing_dag_id"
self.assertIsNone(dagbag.get_dag(non_existing_dag_id))
def test_process_file_that_contains_multi_bytes_char(self):
"""
test that we're able to parse file that contains multi-byte char
"""
from tempfile import NamedTemporaryFile
f = NamedTemporaryFile()
f.write('\u3042'.encode('utf8')) # write multi-byte char (hiragana)
f.flush()
dagbag = models.DagBag(include_examples=True)
self.assertEqual([], dagbag.process_file(f.name))
def test_zip(self):
"""
test the loading of a DAG within a zip file that includes dependencies
"""
dagbag = models.DagBag()
dagbag.process_file(os.path.join(TEST_DAGS_FOLDER, "test_zip.zip"))
self.assertTrue(dagbag.get_dag("test_zip_dag"))
@patch.object(DagModel,'get_current')
def test_get_dag_without_refresh(self, mock_dagmodel):
"""
Test that, once a DAG is loaded, it doesn't get refreshed again if it
hasn't been expired.
"""
dag_id = 'example_bash_operator'
mock_dagmodel.return_value = DagModel()
mock_dagmodel.return_value.last_expired = None
mock_dagmodel.return_value.fileloc = 'foo'
class TestDagBag(models.DagBag):
process_file_calls = 0
def process_file(self, filepath, only_if_updated=True, safe_mode=True):
if 'example_bash_operator.py' == os.path.basename(filepath):
TestDagBag.process_file_calls += 1
super(TestDagBag, self).process_file(filepath, only_if_updated, safe_mode)
dagbag = TestDagBag(include_examples=True)
processed_files = dagbag.process_file_calls
# Should not call process_file agani, since it's already loaded during init.
self.assertEqual(1, dagbag.process_file_calls)
self.assertIsNotNone(dagbag.get_dag(dag_id))
self.assertEqual(1, dagbag.process_file_calls)
def test_get_dag_fileloc(self):
"""
Test that fileloc is correctly set when we load example DAGs,
specifically SubDAGs.
"""
dagbag = models.DagBag(include_examples=True)
expected = {
'example_bash_operator': 'example_bash_operator.py',
'example_subdag_operator': 'example_subdag_operator.py',
'example_subdag_operator.section-1': 'subdags/subdag.py'
}
for dag_id, path in expected.items():
dag = dagbag.get_dag(dag_id)
self.assertTrue(
dag.fileloc.endswith('airflow/example_dags/' + path))
class TaskInstanceTest(unittest.TestCase):
def test_set_dag(self):
"""
Test assigning Operators to Dags, including deferred assignment
"""
dag = DAG('dag', start_date=DEFAULT_DATE)
dag2 = DAG('dag2', start_date=DEFAULT_DATE)
op = DummyOperator(task_id='op_1', owner='test')
# no dag assigned
self.assertFalse(op.has_dag())
self.assertRaises(AirflowException, getattr, op, 'dag')
# no improper assignment
with self.assertRaises(TypeError):
op.dag = 1
op.dag = dag
# no reassignment
with self.assertRaises(AirflowException):
op.dag = dag2
# but assigning the same dag is ok
op.dag = dag
self.assertIs(op.dag, dag)
self.assertIn(op, dag.tasks)
def test_infer_dag(self):
dag = DAG('dag', start_date=DEFAULT_DATE)
dag2 = DAG('dag2', start_date=DEFAULT_DATE)
op1 = DummyOperator(task_id='test_op_1', owner='test')
op2 = DummyOperator(task_id='test_op_2', owner='test')
op3 = DummyOperator(task_id='test_op_3', owner='test', dag=dag)
op4 = DummyOperator(task_id='test_op_4', owner='test', dag=dag2)
# double check dags
self.assertEqual(
[i.has_dag() for i in [op1, op2, op3, op4]],
[False, False, True, True])
# can't combine operators with no dags
self.assertRaises(AirflowException, op1.set_downstream, op2)
# op2 should infer dag from op1
op1.dag = dag
op1.set_downstream(op2)
self.assertIs(op2.dag, dag)
# can't assign across multiple DAGs
self.assertRaises(AirflowException, op1.set_downstream, op4)
self.assertRaises(AirflowException, op1.set_downstream, [op3, op4])
def test_bitshift_compose_operators(self):
dag = DAG('dag', start_date=DEFAULT_DATE)
op1 = DummyOperator(task_id='test_op_1', owner='test')
op2 = DummyOperator(task_id='test_op_2', owner='test')
op3 = DummyOperator(task_id='test_op_3', owner='test')
op4 = DummyOperator(task_id='test_op_4', owner='test')
op5 = DummyOperator(task_id='test_op_5', owner='test')
# can't compose operators without dags
with self.assertRaises(AirflowException):
op1 >> op2
dag >> op1 >> op2 << op3
# make sure dag assignment carries through
# using __rrshift__
self.assertIs(op1.dag, dag)
self.assertIs(op2.dag, dag)
self.assertIs(op3.dag, dag)
# op2 should be downstream of both
self.assertIn(op2, op1.downstream_list)
self.assertIn(op2, op3.downstream_list)
# test dag assignment with __rlshift__
dag << op4
self.assertIs(op4.dag, dag)
# dag assignment with __rrshift__
dag >> op5
self.assertIs(op5.dag, dag)
@patch.object(DAG, 'concurrency_reached')
def test_requeue_over_concurrency(self, mock_concurrency_reached):
mock_concurrency_reached.return_value = True
dag = DAG(dag_id='test_requeue_over_concurrency', start_date=DEFAULT_DATE,
max_active_runs=1, concurrency=2)
task = DummyOperator(task_id='test_requeue_over_concurrency_op', dag=dag)
ti = TI(task=task, execution_date=datetime.datetime.now())
ti.run()
self.assertEqual(ti.state, models.State.NONE)
@patch.object(TI, 'pool_full')
def test_run_pooling_task(self, mock_pool_full):
"""
test that running task update task state as without running task.
(no dependency check in ti_deps anymore, so also -> SUCCESS)
"""
# Mock the pool out with a full pool because the pool doesn't actually exist
mock_pool_full.return_value = True
dag = models.DAG(dag_id='test_run_pooling_task')
task = DummyOperator(task_id='test_run_pooling_task_op', dag=dag,
pool='test_run_pooling_task_pool', owner='airflow',
start_date=datetime.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=datetime.datetime.now())
ti.run()
self.assertEqual(ti.state, models.State.SUCCESS)
@patch.object(TI, 'pool_full')
def test_run_pooling_task_with_mark_success(self, mock_pool_full):
"""
test that running task with mark_success param update task state as SUCCESS
without running task.
"""
# Mock the pool out with a full pool because the pool doesn't actually exist
mock_pool_full.return_value = True
dag = models.DAG(dag_id='test_run_pooling_task_with_mark_success')
task = DummyOperator(
task_id='test_run_pooling_task_with_mark_success_op',
dag=dag,
pool='test_run_pooling_task_with_mark_success_pool',
owner='airflow',
start_date=datetime.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=datetime.datetime.now())
ti.run(mark_success=True)
self.assertEqual(ti.state, models.State.SUCCESS)
def test_run_pooling_task_with_skip(self):
"""
test that running task which returns AirflowSkipOperator will end
up in a SKIPPED state.
"""
def raise_skip_exception():
raise AirflowSkipException
dag = models.DAG(dag_id='test_run_pooling_task_with_skip')
task = PythonOperator(
task_id='test_run_pooling_task_with_skip',
dag=dag,
python_callable=raise_skip_exception,
owner='airflow',
start_date=datetime.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=datetime.datetime.now())
ti.run()
self.assertTrue(ti.state == models.State.SKIPPED)
def test_retry_delay(self):
"""
Test that retry delays are respected
"""
dag = models.DAG(dag_id='test_retry_handling')
task = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
retry_delay=datetime.timedelta(seconds=3),
dag=dag,
owner='airflow',
start_date=datetime.datetime(2016, 2, 1, 0, 0, 0))
def run_with_error(ti):
try:
ti.run()
except AirflowException:
pass
ti = TI(
task=task, execution_date=datetime.datetime.now())
# first run -- up for retry
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 1)
# second run -- still up for retry because retry_delay hasn't expired
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
# third run -- failed
time.sleep(3)
run_with_error(ti)
self.assertEqual(ti.state, State.FAILED)
@patch.object(TI, 'pool_full')
def test_retry_handling(self, mock_pool_full):
"""
Test that task retries are handled properly
"""
# Mock the pool with a pool with slots open since the pool doesn't actually exist
mock_pool_full.return_value = False
dag = models.DAG(dag_id='test_retry_handling')
task = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
retry_delay=datetime.timedelta(seconds=0),
dag=dag,
owner='airflow',
start_date=datetime.datetime(2016, 2, 1, 0, 0, 0))
def run_with_error(ti):
try:
ti.run()
except AirflowException:
pass
ti = TI(
task=task, execution_date=datetime.datetime.now())
# first run -- up for retry
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 1)
# second run -- fail
run_with_error(ti)
self.assertEqual(ti.state, State.FAILED)
self.assertEqual(ti.try_number, 2)
# Clear the TI state since you can't run a task with a FAILED state without
# clearing it first
ti.set_state(None, settings.Session())
# third run -- up for retry
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 3)
# fourth run -- fail
run_with_error(ti)
self.assertEqual(ti.state, State.FAILED)
self.assertEqual(ti.try_number, 4)
def test_next_retry_datetime(self):
delay = datetime.timedelta(seconds=3)
delay_squared = datetime.timedelta(seconds=9)
max_delay = datetime.timedelta(seconds=10)
dag = models.DAG(dag_id='fail_dag')
task = BashOperator(
task_id='task_with_exp_backoff_and_max_delay',
bash_command='exit 1',
retries=3,
retry_delay=delay,
retry_exponential_backoff=True,
max_retry_delay=max_delay,
dag=dag,
owner='airflow',
start_date=datetime.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=datetime.datetime.now())
ti.end_date = datetime.datetime.now()
ti.try_number = 1
dt = ti.next_retry_datetime()
self.assertEqual(dt, ti.end_date+delay)
ti.try_number = 2
dt = ti.next_retry_datetime()
self.assertEqual(dt, ti.end_date+delay_squared)
ti.try_number = 3
dt = ti.next_retry_datetime()
self.assertEqual(dt, ti.end_date+max_delay)
def test_depends_on_past(self):
dagbag = models.DagBag(dag_folder=TEST_DAG_FOLDER)
dag = dagbag.get_dag('test_depends_on_past')
dag.clear()
task = dag.tasks[0]
run_date = task.start_date + datetime.timedelta(days=5)
ti = TI(task, run_date)
# depends_on_past prevents the run
task.run(start_date=run_date, end_date=run_date)
ti.refresh_from_db()
self.assertIs(ti.state, None)
# ignore first depends_on_past to allow the run
task.run(
start_date=run_date,
end_date=run_date,
ignore_first_depends_on_past=True)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
# Parameterized tests to check for the correct firing
# of the trigger_rule under various circumstances
# Numeric fields are in order:
# successes, skipped, failed, upstream_failed, done
@parameterized.expand([
#
# Tests for all_success
#
['all_success', 5, 0, 0, 0, 5, True, None, True],
['all_success', 2, 0, 0, 0, 2, True, None, False],
['all_success', 2, 0, 1, 0, 3, True, ST.UPSTREAM_FAILED, False],
['all_success', 2, 1, 0, 0, 3, True, None, False],
['all_success', 0, 5, 0, 0, 5, True, ST.SKIPPED, True],
#
# Tests for one_success
#
['one_success', 5, 0, 0, 0, 5, True, None, True],
['one_success', 2, 0, 0, 0, 2, True, None, True],
['one_success', 2, 0, 1, 0, 3, True, None, True],
['one_success', 2, 1, 0, 0, 3, True, None, True],
['one_success', 0, 2, 0, 0, 2, True, None, True],
#
# Tests for all_failed
#
['all_failed', 5, 0, 0, 0, 5, True, ST.SKIPPED, False],
['all_failed', 0, 0, 5, 0, 5, True, None, True],
['all_failed', 2, 0, 0, 0, 2, True, ST.SKIPPED, False],
['all_failed', 2, 0, 1, 0, 3, True, ST.SKIPPED, False],
['all_failed', 2, 1, 0, 0, 3, True, ST.SKIPPED, False],
#
# Tests for one_failed
#
['one_failed', 5, 0, 0, 0, 5, True, ST.SKIPPED, False],
['one_failed', 2, 0, 0, 0, 2, True, None, False],
['one_failed', 2, 0, 1, 0, 2, True, None, True],
['one_failed', 2, 1, 0, 0, 3, True, None, False],
['one_failed', 2, 3, 0, 0, 5, True, ST.SKIPPED, False],
#
# Tests for done
#
['all_done', 5, 0, 0, 0, 5, True, None, True],
['all_done', 2, 0, 0, 0, 2, True, None, False],
['all_done', 2, 0, 1, 0, 3, True, None, False],
['all_done', 2, 1, 0, 0, 3, True, None, False]
])
def test_check_task_dependencies(self, trigger_rule, successes, skipped,
failed, upstream_failed, done,
flag_upstream_failed,
expect_state, expect_completed):
start_date = datetime.datetime(2016, 2, 1, 0, 0, 0)
dag = models.DAG('test-dag', start_date=start_date)
downstream = DummyOperator(task_id='downstream',
dag=dag, owner='airflow',
trigger_rule=trigger_rule)
for i in range(5):
task = DummyOperator(task_id='runme_{}'.format(i),
dag=dag, owner='airflow')
task.set_downstream(downstream)
run_date = task.start_date + datetime.timedelta(days=5)
ti = TI(downstream, run_date)
dep_results = TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=successes,
skipped=skipped,
failed=failed,
upstream_failed=upstream_failed,
done=done,
flag_upstream_failed=flag_upstream_failed)
completed = all([dep.passed for dep in dep_results])
self.assertEqual(completed, expect_completed)
self.assertEqual(ti.state, expect_state)
def test_xcom_pull_after_success(self):
"""
tests xcom set/clear relative to a task in a 'success' rerun scenario
"""
key = 'xcom_key'
value = 'xcom_value'
dag = models.DAG(dag_id='test_xcom', schedule_interval='@monthly')
task = DummyOperator(
task_id='test_xcom',
dag=dag,
pool='test_xcom',
owner='airflow',
start_date=datetime.datetime(2016, 6, 2, 0, 0, 0))
exec_date = datetime.datetime.now()
ti = TI(
task=task, execution_date=exec_date)
ti.run(mark_success=True)
ti.xcom_push(key=key, value=value)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
ti.run()
# The second run and assert is to handle AIRFLOW-131 (don't clear on
# prior success)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
# Test AIRFLOW-703: Xcom shouldn't be cleared if the task doesn't
# execute, even if dependencies are ignored
ti.run(ignore_all_deps=True, mark_success=True)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
# Xcom IS finally cleared once task has executed
ti.run(ignore_all_deps=True)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), None)
def test_xcom_pull_different_execution_date(self):
"""
tests xcom fetch behavior with different execution dates, using
both xcom_pull with "include_prior_dates" and without
"""
key = 'xcom_key'
value = 'xcom_value'
dag = models.DAG(dag_id='test_xcom', schedule_interval='@monthly')
task = DummyOperator(
task_id='test_xcom',
dag=dag,
pool='test_xcom',
owner='airflow',
start_date=datetime.datetime(2016, 6, 2, 0, 0, 0))
exec_date = datetime.datetime.now()
ti = TI(
task=task, execution_date=exec_date)
ti.run(mark_success=True)
ti.xcom_push(key=key, value=value)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
ti.run()
exec_date += datetime.timedelta(days=1)
ti = TI(
task=task, execution_date=exec_date)
ti.run()
# We have set a new execution date (and did not pass in
# 'include_prior_dates'which means this task should now have a cleared
# xcom value
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), None)
# We *should* get a value using 'include_prior_dates'
self.assertEqual(ti.xcom_pull(task_ids='test_xcom',
key=key,
include_prior_dates=True),
value)
def test_post_execute_hook(self):
"""
Test that post_execute hook is called with the Operator's result.
The result ('error') will cause an error to be raised and trapped.
"""
class TestError(Exception):
pass
class TestOperator(PythonOperator):
def post_execute(self, context, result):
if result == 'error':
raise TestError('expected error.')
dag = models.DAG(dag_id='test_post_execute_dag')
task = TestOperator(
task_id='test_operator',
dag=dag,
python_callable=lambda: 'error',
owner='airflow',
start_date=datetime.datetime(2017, 2, 1))
ti = TI(task=task, execution_date=datetime.datetime.now())
with self.assertRaises(TestError):
ti.run()
| apache-2.0 | -7,818,588,167,669,180,000 | 35.084643 | 90 | 0.580189 | false |
MERegistro/meregistro | meregistro/apps/backend/views/__init__.py | 1 | 2562 | # -*- coding: UTF-8 -*-
from meregistro.shortcuts import my_render
from apps.seguridad.decorators import credential_required
from apps.backend.forms import ConfiguracionSolapasEstablecimientoForm
from apps.backend.models import ConfiguracionSolapasEstablecimiento
from apps.backend.forms import ConfiguracionSolapasAnexoForm
from apps.backend.models import ConfiguracionSolapasAnexo
from apps.backend.forms import ConfiguracionSolapasExtensionAulicaForm
from apps.backend.models import ConfiguracionSolapasExtensionAulica
@credential_required('seg_backend')
def index(request):
return my_render(request, 'backend/index.html')
@credential_required('seg_backend')
def configurar_solapas_establecimiento(request):
solapas_config = ConfiguracionSolapasEstablecimiento.get_instance()
if request.method == 'POST':
form = ConfiguracionSolapasEstablecimientoForm(request.POST, instance=solapas_config)
if form.is_valid():
form.save()
request.set_flash('success', 'Datos guardados correctamente.')
else:
request.set_flash('warning', 'Ocurrió un error guardando los datos.')
else:
form = ConfiguracionSolapasEstablecimientoForm(instance=solapas_config)
return my_render(request, 'backend/solapas_establecimiento.html', {
'form': form,
})
@credential_required('seg_backend')
def configurar_solapas_anexo(request):
solapas_config = ConfiguracionSolapasAnexo.get_instance()
if request.method == 'POST':
form = ConfiguracionSolapasAnexoForm(request.POST, instance=solapas_config)
if form.is_valid():
form.save()
request.set_flash('success', 'Datos guardados correctamente.')
else:
request.set_flash('warning', 'Ocurrió un error guardando los datos.')
else:
form = ConfiguracionSolapasAnexoForm(instance=solapas_config)
return my_render(request, 'backend/solapas_anexo.html', {
'form': form,
})
@credential_required('seg_backend')
def configurar_solapas_extension_aulica(request):
solapas_config = ConfiguracionSolapasExtensionAulica.get_instance()
if request.method == 'POST':
form = ConfiguracionSolapasExtensionAulicaForm(request.POST, instance=solapas_config)
if form.is_valid():
form.save()
request.set_flash('success', 'Datos guardados correctamente.')
else:
request.set_flash('warning', 'Ocurrió un error guardando los datos.')
else:
form = ConfiguracionSolapasExtensionAulicaForm(instance=solapas_config)
return my_render(request, 'backend/solapas_extension_aulica.html', {
'form': form,
})
| bsd-3-clause | 5,193,089,650,953,476,000 | 36.632353 | 89 | 0.75381 | false |
syagev/kaggle_dsb | luna16/src/conv_net/data.py | 1 | 2668 | from __future__ import division
import numpy as np
import os
import pickle
import glob
import Image
from skimage.io import imread
from sklearn.cross_validation import train_test_split
dataset_dir = "../../data/samples"
def load():
tps = glob.glob(dataset_dir+"/*true.jpg")
fps_2 = glob.glob(dataset_dir+"/*false.jpg")
fps = np.random.choice(fps_2,10000)
images_tps = [[imread(x)] for x in tps]
images_fps = [[imread(x)] for x in fps]
labels = np.concatenate((np.ones((len(images_tps))),np.zeros((len(images_fps))))).astype("ubyte")
images = np.concatenate((images_tps,images_fps)).astype("float32")
train_X, test_X, train_y, test_y = train_test_split(images,labels, test_size=0.4, random_state=1337)
half = 0.5*len(test_X)
val_X = test_X[:half]
val_y = test_y[:half]
test_X = test_X[half:]
test_y = test_y[half:]
label_to_names = {0:"false",1:"true"}
# training set, batches 1-4
# train_X = np.zeros((40000, 3, 32, 32), dtype="float32")
# train_y = np.zeros((40000, 1), dtype="ubyte").flatten()
# n_samples = 10000 # number of samples per batch
# for i in range(0,4):
# f = open(os.path.join(dataset_dir, "data_batch_"+str(i+1)+""), "rb")
# cifar_batch = pickle.load(f)
# f.close()
# train_X[i*n_samples:(i+1)*n_samples] = (cifar_batch['data'].reshape(-1, 3, 32, 32) / 255.).astype("float32")
# train_y[i*n_samples:(i+1)*n_samples] = np.array(cifar_batch['labels'], dtype='ubyte')
#
# # validation set, batch 5
# f = open(os.path.join(dataset_dir, "data_batch_5"), "rb")
# cifar_batch_5 = pickle.load(f)
# f.close()
# val_X = (cifar_batch_5['data'].reshape(-1, 3, 32, 32) / 255.).astype("float32")
# val_y = np.array(cifar_batch_5['labels'], dtype='ubyte')
#
# # labels
# f = open(os.path.join(dataset_dir, "batches.meta"), "rb")
# cifar_dict = pickle.load(f)
# label_to_names = {k:v for k, v in zip(range(10), cifar_dict['label_names'])}
# f.close()
#
# # test set
# f = open(os.path.join(dataset_dir, "test_batch"), "rb")
# cifar_test = pickle.load(f)
# f.close()
# test_X = (cifar_test['data'].reshape(-1, 3, 32, 32) / 255.).astype("float32")
# test_y = np.array(cifar_test['labels'], dtype='ubyte')
#
#
# print("training set size: data = {}, labels = {}".format(train_X.shape, train_y.shape))
# print("validation set size: data = {}, labels = {}".format(val_X.shape, val_y.shape))
# print("test set size: data = {}, labels = {}".format(test_X.shape, test_y.shape))
#
return train_X, train_y, val_X, val_y, test_X, test_y, label_to_names | apache-2.0 | -957,578,049,570,239,400 | 40.061538 | 118 | 0.594078 | false |
ace3df/ImageTweet | plugins/safebooru.py | 1 | 9578 | # -*- coding: utf-8 -*-
import random
import time
import sys
import os
import re
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
import utils
def delete_image(image):
import time
time.sleep(10)
os.remove(image)
def tag_clean(tag_html):
text = tag_html.text
text = text.rstrip('1234567890.')
text = text.replace("'", "\'").strip()
return text
def get_image_online(**kwargs):
if kwargs.get('used images'):
txt_name = kwargs.get('used images')
used_links = open(txt_name, 'r').read().splitlines()
else:
txt_name = os.path.join(os.getcwd(), "Used safebooru {0}.txt".format(
kwargs['bot name']))
try:
used_links = open(txt_name, 'r').read().splitlines()
except:
if not os.path.exists(txt_name):
print("Didn't find any used links! Creating a TXT!")
print("Set it to:\n{0}".format(txt_name))
used_links = []
else:
used_links = open(txt_name, 'r').read().splitlines()
if kwargs.get('highest page'):
high_page = int(kwargs.get('highest page'))
else:
high_page = 50
tried_pages = [high_page]
cookie_file = None
try_count = 0
low_page = 0
page = 0
x = None
no_images = False
url_start = "http://safebooru.org"
url_search = "http://safebooru.org/index.php?page=post&s=list&tags="
if utils.is_bool(kwargs.get('login')):
cookie_file = "../safebooru.txt"
url_login = url_start + "/index.php?page=account&s=login&code=00"
form_num = 0
form_user = "user"
form_password = "pass"
username = kwargs.get('username')
password = kwargs.get('password')
if not os.path.exists(cookie_file):
browser, s = utils.scrape_site(url_login, cookie_file, True)
form = browser.get_form(form_num)
form[form_user].value = username
form[form_password].value = password
browser.submit_form(form)
s.cookies.save()
if utils.is_bool(kwargs.get('save images')):
if kwargs.get('path'):
path = kwargs.get('path')
else:
path = os.path.abspath(os.path.join(os.getcwd(),
"images"))
if not os.path.exists(path):
os.makedirs(path)
else:
path = os.path.abspath(os.path.join(os.getcwd()))
if kwargs.get('tags'):
if isinstance(kwargs.get('tags'), list):
tags = '+'.join(kwargs.get('tags'))
else:
tags = '+'.join(kwargs.get('tags').split(', '))
else:
tags = ""
if kwargs.get('ignore tags'):
if isinstance(kwargs.get('ignore tags'), list):
ignore_tags = kwargs.get('ignore tags')
else:
ignore_tags = kwargs.get('ignore tags').split(', ')
else:
ignore_tags = []
if utils.is_bool(kwargs.get('ignore cosplay')):
ignore_cosplay = utils.is_bool(kwargs.get('ignore cosplay'))
else:
ignore_cosplay = False
if utils.is_bool(kwargs.get('accept webm')):
accept_webm = utils.is_bool(kwargs.get('accept webm'))
else:
accept_webm = False
tried_pages = [high_page + 41]
while True:
while True:
while True:
while True:
no_images = False
try_count += 1
if try_count == 15:
return False, False
page = str(int(random.randint(low_page, high_page) * 40))
while int(page) in tried_pages:
if int(page) == 0:
break
if not x:
x = high_page
page = str(int(
random.randint(low_page, high_page) * 1))
if int(page) > int(x):
continue
tried_pages.append(int(page))
x = min(tried_pages)
page_url = "&pid=" + str(page)
url = "%s%s%s" % (url_search, tags, page_url)
browser = utils.scrape_site(url, cookie_file)
if browser.find('h1', text="Nothing found, try google? "):
no_images = True
elif len(browser.find_all('img')) < 3:
no_images = True
time.sleep(1)
if not no_images:
break
elif no_images and int(page) == 0:
return False, False
good_image_links = []
image_links = browser.find_all('span', class_="thumb")
for link in image_links:
try:
link['id']
except:
continue
link = str(link['id'])[1:]
good_image_links.append(link)
if good_image_links == []:
return False, False
random.shuffle(good_image_links)
url = "%s/index.php?page=post&s=view&id=%s" % (
url_start, random.choice(good_image_links))
try_count = 0
while url in used_links:
url = "%s/index.php?page=post&s=view&id=%s" % (
url_start, random.choice(good_image_links))
try_count = try_count + 1
if try_count == 20:
break
used_links.append(url)
post_url = url
browser.open(url)
image_tags = []
char_tags = []
art_tags = []
sers_tags = []
tags_tags = []
site_tag = browser.find('ul', id="tag-sidebar")
site_tag = site_tag.find_all('li')
for taga in site_tag:
tag = tag_clean(taga)
if taga['class'][0] == "tag-type-artist":
art_tags.append(tag.title())
elif taga['class'][0] == "tag-type-copyright":
sers_tags.append(tag.title())
elif taga['class'][0] == "tag-type-character":
char_tags.append(tag.title())
else:
tags_tags.append(tag.title())
image_tags.append(tag.lower())
if any([item in [x.lower() for x in ignore_tags]
for item in [x.lower() for x in image_tags]]):
continue
if ignore_cosplay:
if any(" (cosplay)" in s for s in image_tags):
continue
break
filename = ""
if not utils.is_bool(kwargs.get('message')):
message = ""
try:
url = browser.find(
'img', attrs={'id': 'image'})['src'].replace("\\\\", "\\")
except:
# Flash file
continue
sn_kwgs = {}
sn_url, sn_kwgs = utils.saucenao(url, kwargs['saucenao api'], True)
re_dict = {'{#artist}': (
'#' if art_tags else '') + ' #'.join(
[x.replace(" ", "_") for x in art_tags]),
'{#character}': (
'#' if char_tags else '') + ' #'.join(
[x.replace(" ", "_") for x in char_tags]),
'{#series}': (
'#' if sers_tags else '') + ' #'.join(
[x.replace(" ", "_") for x in sers_tags]),
'{#tags}': (
'#' if tags_tags else '') + ' #'.join(
[x.replace(" ", "_") for x in tags_tags]),
'{artist}': ', '.join(art_tags),
'{character}': ', '.join(char_tags),
'{series}': ', '.join(sers_tags),
'{tags}': ', '.join(tags_tags),
'{url}': post_url,
'{sn title}': sn_kwgs.get('title'),
'{sn illust id}': sn_kwgs.get('illust id'),
'{sn illust url}': sn_url,
'{sn artist}': sn_kwgs.get('artist'),
'{sn artist id}': sn_kwgs.get('artist id'),
'{sn artist url}': sn_kwgs.get('artist url')}
if kwargs.get('filename'):
filename = utils.replace_all(kwargs.get('filename'), re_dict)
filename = utils.safe_msg(filename)
if kwargs.get('message'):
message = utils.replace_all(kwargs.get('message'), re_dict)
message = utils.safe_msg(message)
with open(txt_name, 'w') as f:
f.write("\n".join(used_links))
tweet_image = utils.download_image(url, path, filename, **kwargs)
if tweet_image:
break
if not utils.is_bool(kwargs.get('save images')):
from threading import Thread
Thread(name="Delete Image", target=delete_image, args=(
tweet_image, )).start()
return message, tweet_image
def main(**kwargs):
message, image = get_image_online(**kwargs)
return(message, image)
| mit | 392,481,504,197,402,100 | 37.934959 | 79 | 0.448423 | false |
davebridges/mousedb | mousedb/veterinary/views.py | 1 | 7047 | '''This module generates the views for the veterinary app.
There is one generic home view for the entire app as well as detail, create update and delete views for these models:
* :class:`~mousedb.veterinary.models.MedicalIssue`
* :class:`~mousedb.veterinary.models.MedicalCondition`
* :class:`~mousedb.veterinary.models.MedicalTreatment`
'''
from django.views.generic.base import TemplateView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.core.urlresolvers import reverse_lazy
from braces.views import LoginRequiredMixin, PermissionRequiredMixin
from mousedb.veterinary.models import MedicalIssue,MedicalCondition,MedicalTreatment
class VeterinaryHome(LoginRequiredMixin, TemplateView):
'''This view is the main page for the veterinary app.
This view contains links to all medical issues, conditions and treatments.
If this becomes too unwieldy over time, it might be necessary to limit medical_issues to the most recent few.'''
template_name = "veterinary_home.html"
def get_context_data(self, **kwargs):
'''Adds to the context all issues, conditions and treatments.'''
context = super(VeterinaryHome, self).get_context_data(**kwargs)
context['medical_issues'] = MedicalIssue.objects.all()
context['medical_conditions'] = MedicalCondition.objects.all()
context['medical_treatments'] = MedicalTreatment.objects.all()
return context
class MedicalIssueDetail(LoginRequiredMixin, DetailView):
'''This view is for details of a particular :class:`~mousedb.veterinary.MedicalIssue`.
It passes an object **medical_issue** when the url **/veterinary/medical-issue/<pk#>** is requested.'''
model = MedicalIssue
context_object_name = 'medical_issue'
template_name = 'medical_issue_detail.html'
class MedicalIssueCreate(PermissionRequiredMixin, CreateView):
'''This view is for creating a new :class:`~mousedb.veterinary.MedicalIssue`.
It requires the permissions to create a new medical issue and is found at the url **/veterinary/medical-issue/new**.'''
permission_required = 'veterinary.create_medicalissue'
model = MedicalIssue
fields = '__all__'
template_name = 'medical_issue_form.html'
class MedicalIssueUpdate(PermissionRequiredMixin, UpdateView):
'''This view is for updating a :class:`~mousedb.veterinary.MedicalIssue`.
It requires the permissions to update a medical issue and is found at the url **/veterinary/medical-issue/<pk$>/edit**.'''
permission_required = 'veterinary.update_medicalissue'
model = MedicalIssue
fields = '__all__'
context_object_name = 'medical_issue'
template_name = 'medical_issue_form.html'
class MedicalIssueDelete(PermissionRequiredMixin, DeleteView):
'''This view is for deleting a :class:`~mousedb.veterinary.MedicalIssue`.
It requires the permissions to delete a medical issue and is found at the url **/veterinary/medical-issue/<pk$>/delete**.'''
permission_required = 'veterinary.delete_medicalissue'
model = MedicalIssue
template_name = 'confirm_delete.html'
success_url = reverse_lazy('veterinary-home')
class MedicalConditionDetail(LoginRequiredMixin, DetailView):
'''This view is for details of a particular :class:`~mousedb.veterinary.MedicalCondition`.
It passes an object **medical_condition** when the url **/veterinary/medical-condition/<slug>** is requested.'''
model = MedicalCondition
context_object_name = 'medical_condition'
template_name = 'medical_condition_detail.html'
class MedicalConditionCreate(PermissionRequiredMixin, CreateView):
'''This view is for creating a new :class:`~mousedb.veterinary.MedicalCondition`.
It requires the permissions to create a new medical issue and is found at the url **/veterinary/medical-condition/new**.'''
permission_required = 'veterinary.create_medicalcondition'
model = MedicalCondition
fields = '__all__'
template_name = 'medical_condition_form.html'
class MedicalConditionUpdate(PermissionRequiredMixin, UpdateView):
'''This view is for updating a :class:`~mousedb.veterinary.MedicalCondition`.
It requires the permissions to update a medical issue and is found at the url **/veterinary/medical-condition/<slug>/edit**.'''
permission_required = 'veterinary.update_medicalcondition'
model = MedicalCondition
fields = '__all__'
context_object_name = 'medical_condition'
template_name = 'medical_condition_form.html'
class MedicalConditionDelete(PermissionRequiredMixin, DeleteView):
'''This view is for deleting a :class:`~mousedb.veterinary.MedicalCondition`.
It requires the permissions to delete a medical issue and is found at the url **/veterinary/medical-condition/<slug>/delete**.'''
permission_required = 'veterinary.delete_medicalcondition'
model = MedicalCondition
template_name = 'confirm_delete.html'
success_url = reverse_lazy('veterinary-home')
class MedicalTreatmentDetail(LoginRequiredMixin, DetailView):
'''This view is for details of a particular :class:`~mousedb.veterinary.MedicalTreatment`.
It passes an object **medical_treatment** when the url **/veterinary/medical-treatment/<slug>** is requested.'''
model = MedicalTreatment
context_object_name = 'medical_treatment'
template_name = 'medical_treatment_detail.html'
class MedicalTreatmentCreate(PermissionRequiredMixin, CreateView):
'''This view is for creating a new :class:`~mousedb.veterinary.MedicalTreatment`.
It requires the permissions to create a new medical issue and is found at the url **/veterinary/medical-treatment/new**.'''
permission_required = 'veterinary.create_medicaltreatment'
model = MedicalTreatment
fields = '__all__'
template_name = 'medical_treatment_form.html'
class MedicalTreatmentUpdate(PermissionRequiredMixin, UpdateView):
'''This view is for updating a :class:`~mousedb.veterinary.MedicalTreatment`.
It requires the permissions to update a medical issue and is found at the url **/veterinary/medical-treatment/<slug>/edit**.'''
permission_required = 'veterinary.update_medicaltreatment'
model = MedicalTreatment
fields = '__all__'
context_object_name = 'medical_treatment'
template_name = 'medical_treatment_form.html'
class MedicalTreatmentDelete(PermissionRequiredMixin, DeleteView):
'''This view is for deleting a :class:`~mousedb.veterinary.MedicalTreatment`.
It requires the permissions to delete a medical issue and is found at the url **/veterinary/medical-treatment/<slug>/delete**.'''
permission_required = 'veterinary.delete_medicaltreatment'
model = MedicalTreatment
template_name = 'confirm_delete.html'
success_url = reverse_lazy('veterinary-home')
| bsd-3-clause | 775,855,840,487,166,100 | 44.75974 | 133 | 0.724847 | false |
ANR-DIADEMS/timeside-diadems | timeside/plugins/diadems/irit_singings.py | 1 | 7234 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Maxime Le Coz <[email protected]>
# This file is part of TimeSide.
# TimeSide is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# TimeSide is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with TimeSide. If not, see <http://www.gnu.org/licenses/>.
# Author: Maxime Le Coz <[email protected]>
from timeside.core import implements, interfacedoc
from timeside.core.analyzer import Analyzer, IAnalyzer
from timeside.plugins.diadems.irit_monopoly import IRITMonopoly
from timeside.plugins.diadems.irit_harmo_tracking import IRITHarmoTracker
from timeside.core.preprocessors import frames_adapter
from numpy import median, mean, linspace, argmin, argmax, array
from numpy.fft import rfft
from collections import Counter
class IRITSinging(Analyzer):
implements(IAnalyzer)
def __init__(self):
super(IRITSinging, self).__init__()
self.parents['irit_monopoly'] = IRITMonopoly()
self.parents['irit_harmo_tracking'] = IRITHarmoTracker()
self.thPoly = 0.15
self.thMono = 0.1
@interfacedoc
def setup(self, channels=None, samplerate=None, blocksize=None,
totalframes=None):
super(IRITSinging, self).setup(
channels, samplerate, blocksize, totalframes)
@staticmethod
@interfacedoc
def id():
return "irit_singing"
@staticmethod
@interfacedoc
def name():
return "IRIT Singings detection"
@staticmethod
@interfacedoc
def unit():
return ""
def __str__(self):
return "Singings segments"
@frames_adapter
def process(self, frames, eod=False):
return frames, eod
def post_process(self):
"""
:return:
"""
trackings = self.parents['irit_harmo_tracking'].results['irit_harmo_tracking']['data_object']["value"]
tr = sorted(trackings[0].nodes, key=lambda x: x.time)
tr_frame_rate = 1.0 / float(tr[1].time - tr[0].time)
pitch = self.parents['irit_monopoly'].results['irit_monopoly.pitch']['data_object']["value"]
segments_monopoly = self.parents['irit_monopoly'].results['irit_monopoly.segments']['data_object']
segments_monopoly = [(start, start + dur, label == 1) for start, dur, label in
zip(segments_monopoly["time"], segments_monopoly["duration"], segments_monopoly["label"])]
segments_chant = []
f0_frame_rate = 1.0 / float(pitch[1][0] - pitch[0][0])
for start, stop, label in segments_monopoly:
cumulChant = 0
# Attention aux changements de labels ...
if label:
segs = split_notes(extract_pitch(pitch, start, stop), f0_frame_rate)
for seg in segs:
if has_vibrato(seg[2], f0_frame_rate):
cumulChant += seg[1] - seg[0]
segments_chant += [(start, stop, cumulChant / (stop - start) >= self.thMono)]
else:
for start, stop, value in extended_vibrato(trackings, tr_frame_rate):
segments_chant += [(start, stop, value >= self.thPoly)]
label = {1: "Singing", 0: "Non Singing"}
segs = self.new_result(data_mode='label', time_mode='segment')
segs.id_metadata.id += '.' + 'segments'
segs.id_metadata.name += ' ' + 'Segments'
segs.data_object.label_metadata.label = label
segs.data_object.time = array([s[0] for s in segments_chant])
segs.data_object.duration = array([s[1] - s[0] for s in segments_chant])
segs.data_object.label = array([int(s[2]) for s in segments_chant])
self.add_result(segs)
def extended_vibrato(trackings, spectrogram_sampling_rate, number_of_extrema_for_rupture=3):
"""
Detection de vibrato en contexte polyphonique
"""
extremums = [s.start for s in trackings] + [s.stop for s in trackings]
last = max(extremums)
counter = Counter(extremums)
ruptures = [0] + sorted([time for time in counter if counter[time] >= number_of_extrema_for_rupture]) + [last]
scores = []
for i, rupture in enumerate(ruptures[:-1]):
sum_present = 0.0
sum_vibrato = 0.0
for s in trackings:
frequencies = s.get_portion(rupture, ruptures[i + 1])
if len(frequencies) > 0.05 * spectrogram_sampling_rate:
sum_present += len(frequencies)
if has_vibrato(frequencies, spectrogram_sampling_rate):
sum_vibrato += len(frequencies)
if sum_present > 0:
scores += [(rupture, ruptures[i + 1], sum_vibrato / sum_present)]
return scores
def extract_pitch(pitch, start, stop):
return [p for t, p in pitch if start <= t <= stop]
def smoothing(data, number_of_points=3, smoothing_function=mean):
"""
"""
w = number_of_points / 2
return [0.0] * w + [smoothing_function(data[i - w:i + w]) for i in range(w, len(data) - w)] + [0.0] * w
def split_notes(f0, f0_sample_rate, minimum_segment_length=0.0):
"""
Découpage en pseudo-notes en fonction de la fréquence fondamentale.
Retourne la liste des segments en secondes
"""
f0 = smoothing(f0, number_of_points=5, smoothing_function=median)
half_tone_ratio = 2**(1.0 / 12.0)
minimum_segment_length = minimum_segment_length / f0_sample_rate
ratios = [max([y1, y2]) / min([y1, y2]) if min([y1, y2]) > 0 else 0 for y1, y2 in zip(f0[:-2], f0[1:])]
boundaries = [0] + [i + 1 for i, ratio in enumerate(ratios) if ratio > half_tone_ratio]
return [(start * f0_sample_rate, stop * f0_sample_rate, f0[start:stop])
for start, stop in zip(boundaries[:-2], boundaries[1:]) if stop - start > minimum_segment_length]
def has_vibrato(serie, sampling_rate, minimum_frequency=4, maximum_frequency=8, Nfft=100):
"""
Calcul de vibrato sur une serie par la méthode de la transformée de Fourier de la dérivée.
"""
vibrato = False
frequency_scale = linspace(0, sampling_rate / 2, Nfft / 2)
index_min_vibrato = argmin(abs(frequency_scale - minimum_frequency))
index_max_vibrato = argmin(abs(frequency_scale - maximum_frequency))
derivative = [v1 - v2 for v1, v2 in zip(serie[:-2], serie[1:])]
fft_derivative = abs(rfft(derivative, Nfft))[:Nfft / 2]
i_max = argmax(fft_derivative)
if index_max_vibrato >= i_max >= index_min_vibrato:
vibrato = True
return vibrato
# Generate Grapher for IRITSinging analyzer
from timeside.core.grapher import DisplayAnalyzer
DisplayIritSinging = DisplayAnalyzer.create(
analyzer=IRITSinging,
result_id='irit_singing.segments',
grapher_id='grapher_irit_singing_segments',
grapher_name='Singings detection',
background='waveform',
staging=True)
| gpl-2.0 | -1,683,434,666,844,066,000 | 35.14 | 119 | 0.639734 | false |
SnapSearch/SnapSearch-Client-Python | src/SnapSearch/detector.py | 1 | 11166 | # -*- coding: utf-8 -*-
"""
SnapSearch.detector
~~~~~~~~~~~~~~~~~~~
:copyright: 2014 by `SnapSearch <https://snapsearch.io/>`_
:license: MIT, see LICENSE for more details.
:author: `LIU Yu <[email protected]>`_
:date: 2014/03/08
"""
# future import should come first
from __future__ import with_statement
__all__ = ['Detector', ]
import json
import os
import re
import sys
import SnapSearch.api as api
import SnapSearch.error as error
from ._compat import u
class Detector(object):
"""
Detects if the incoming HTTP request a) came from a search engine robot
and b) is eligible for interception. The ``Detector`` inspects the
following aspects of the incoming HTTP request:
1. if the request uses HTTP or HTTPS protocol
2. if the request uses HTTP ``GET`` method
3. if the request is *not* from any ignored user agenets
(ignored robots take precedence over matched robots)
4. if the request is accessing any route *not* matching the whitelist
5. if the request is *not* accessing any route matching the blacklist
6. if the request is *not* accessing any resource with an invalid
file extension
7. if the request has ``_escaped_fragment_`` query parameter
8. if the request is from any matched user agents
"""
@property
def robots(self):
"""
``dict`` of ``list``'s of user agents from search engine robots:
.. code-block:: json
{
"ignore": [
# user agents to be ignored
]
"match": [
# user agents to be matched
]
}
Can be changed to customize ignored and matched search engine robots.
The ``ignore`` list takes precedence over the ``match`` list.
"""
return self.__robots
@property
def extensions(self):
"""
``dict`` of ``list``'s of valid file extensions:
.. code-block:: json
{
"generic": [
# valid generic extensions
],
"python": [
# valid python extensions
]
}
Can be changed to customize valid file extensions.
"""
return self.__extensions
# private properties
__slots__ = ['__check_file_extensions', '__extensions', '__ignored_routes',
'__matched_routes', '__robots', ]
def __init__(self,
ignored_routes=[],
matched_routes=[],
check_file_extensions=False,
robots_json=None,
extensions_json=None):
"""
Optional arguments:
:param ignored_routes: blacklisted route regular expressions.
:type ignored_routes: ``list`` or ``tuple``
:param matched_routes: whitelisted route regular expressions.
:type matched_routes: ``list`` or ``tuple``
:param check_file_extensions: to check if the URL is going to a static
file resource that should not be intercepted.
:type check_file_extensions: ``bool``
:param robots_json: absolute path to an external ``robots.json`` file.
:param extensions_json: absolute path to an external
``extensions.json`` file.
:raises AssertionError: if ``extensions.json`` is specified, yet
``check_file_extensions`` is ``False``.
"""
self.__ignored_routes = set(ignored_routes)
self.__matched_routes = set(matched_routes)
# ``extensions.json`` is specified, yet do not require checking file
# extensions. this probably means a mistake.
assert(not (not check_file_extensions and extensions_json)), \
"specified ``extensions_json`` " \
"yet ``check_file_extensions`` is false"
self.__check_file_extensions = check_file_extensions
# json.load() may raise IOError, TypeError, or ValueError
with open(robots_json or api.DEFAULT_ROBOTS_JSON) as f:
self.__robots = json.load(f)
f.close()
# same as above
with open(extensions_json or api.DEFAULT_EXTENSIONS_JSON) as f:
self.__extensions = json.load(f)
f.close()
pass # void return
def __call__(self, request):
"""
:param request: incoming HTTP request.
:type request: ``dict``
:returns: :RFC:`3986` percent-encoded full URL if the incoming HTTP
request is eligible for interception, or ``None`` otherwise.
:raises error.SnapSearchError: if the structure of either
``robots.json`` or ``extensions.json`` is invalid.
"""
# wrap the incoming HTTP request (CGI-style environ)
environ = api.AnyEnv(request)
# do not intercept protocols other than HTTP and HTTPS
if environ.scheme not in ("http", "https", ):
return None
# do not intercept HTTP methods other than GET
if environ.method not in ("GET", ):
return None
# user agent may not exist in the HTTP request
user_agent = environ.user_agent
# request uri with query string
real_path = environ.path_qs
# validate ``robots`` since it can be altered from outside
if not self._validate_robots():
raise error.SnapSearchError(
"structure of ``robots`` is invalid")
# do not intercept requests from ignored robots
ignore_regex = u("|").join(
[re.escape(tok) for tok in self.robots.get('ignore', [])])
if re.search(ignore_regex, user_agent, re.I | re.U):
return None
# do not intercept if there exist whitelisted route(s) (matched_routes)
# and that the requested route **does not** match any one of them.
if self.__matched_routes:
found = False
for route in self.__matched_routes:
route_regex = u(route)
if re.search(route_regex, real_path, re.I | re.U):
found = True
break
if not found:
return None
# do not intercept if there exist blacklisted route(s) (ignored_routes)
# and that the requested route **does** matches one of them.
if self.__ignored_routes:
for route in self.__ignored_routes:
route_regex = u(route)
if re.search(route_regex, real_path, re.I | re.U):
return None
# detect extensions in order to prevent direct requests to static files
if self.__check_file_extensions:
# validate ``extensions`` since it can be altered from outside
if not self._validate_extensions():
raise error.SnapSearchError(
"structure of ``extensions`` is invalid")
# create a set of file extensions common for HTML resources
valid_extensions = set(
[s.lower() for s in self.extensions.get('generic', [])])
valid_extensions.update(
[s.lower() for s in self.extensions.get('python', [])])
# file extension regex. it looks for "/{file}.{ext}" in an URL that
# is not preceded by '?' (query parameters) or '#' (hash fragment).
# it will acquire the last extension that is present in the URL so
# with "/{file1}.{ext1}/{file2}.{ext2}" the ext2 will be the
# matched extension. furthermore if a file has multiple extensions
# "/{file}.{ext1}.{ext2}", it will only match extension2 because
# unix systems don't consider extensions to be metadata, and
# windows only considers the last extension to be valid metadata.
# Basically the {file}.{ext1} could actually just be the filename.
extension_regex = u(r"""
^ # start of the string
(?: # begin non-capturing group
(?! # begin negative lookahead
[?#] # question mark '?' or hash '#'
.* # zero or more wildcard characters
/ # literal slash '/'
[^/?#]+ # {file} - has one or more of any character
# except '/', '?' or '#'
\. # literal dot '.'
[^/?#]+ # {extension} - has one or more of any character
# except '/', '?' or '#'
) # end negative lookahead (prevents any '?' or
# '#' that precedes {file}.{extension} by
# any characters)
. # one wildcard character
)* # end non-capturing group (captures any number
# of wildcard characters that passes the
# negative lookahead)
/ # literal slash '/'
[^/?#]+ # {file} - has one or more of any character
# except forward slash, question mark or hash
\. # literal dot '.'
([^/?#]+) # {extension} - subgroup has one or more of any
# character except '/', '?' or '#'
""")
# match extension regex against decoded path
matches = re.match(extension_regex, real_path, re.U | re.X)
if matches:
url_extension = matches.group(1).lower()
if url_extension not in valid_extensions:
return None
# detect escaped fragment (since the ignored user agents has already
# been detected, SnapSearch won't continue the interception loop)
if "_escaped_fragment_" in environ.GET:
return environ.url
# intercept requests from matched robots
matched_regex = u("|").join(
[re.escape(tok) for tok in self.robots.get('match', [])])
if re.search(matched_regex, user_agent, re.I | re.U):
return environ.url
# do not intercept if no match at all
return None
def _validate_robots(self):
# ``robots`` should be a ``dict`` object, if keys ``ignore`` and
# ``match`` exist, the respective values must be ``list`` objects.
return isinstance(self.robots, dict) and \
isinstance(self.robots.get('ignore', []), list) and \
isinstance(self.robots.get('match', []), list)
def _validate_extensions(self):
# ``extensions`` should be a ``dict`` object, if keys ``generic`` and
# ``python`` exist, the respective values must be ``list`` objects.
return isinstance(self.extensions, dict) and \
isinstance(self.extensions.get('generic', []), list) and \
isinstance(self.extensions.get('python', []), list)
pass
| mit | 7,516,432,823,431,704,000 | 38.178947 | 79 | 0.543346 | false |
phoebe-project/phoebe2-docs | 2.2/tutorials/irrad_method_horvat.py | 1 | 3005 | #!/usr/bin/env python
# coding: utf-8
# Lambert Scattering (irrad_method='horvat')
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.2 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.2,<2.3"')
# As always, let's do imports and initialize a logger and a new bundle. See [Building a System](../tutorials/building_a_system.ipynb) for more details.
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger('error')
b = phoebe.default_binary()
# Relevant Parameters
# ---------------------------------
# For parameters that affect reflection and heating (irrad_frac_\*) see the tutorial on [reflection and heating](./reflection_heating.ipynb).
#
# The 'irrad_method' compute option dictates whether irradiation is handled according to the new Horvat scheme which includes Lambert Scattering, Wilson's original reflection scheme, or ignored entirely.
# In[3]:
print(b['irrad_method'])
# Influence on Light Curves (fluxes)
# ---------------------------------
#
# Let's (roughtly) reproduce Figure 8 from [Prsa et al. 2016](http://phoebe-project.org/publications/2016Prsa+) which shows the difference between Wilson and Horvat schemes for various inclinations.
#
# <img src="prsa+2016_fig8.png" alt="Figure 8" width="600px"/>
#
# First we'll roughly create a A0-K0 binary and set reasonable albedos.
# In[4]:
b['teff@primary'] = 11000
b['requiv@primary'] = 2.5
b['gravb_bol@primary'] = 1.0
b['teff@secondary'] = 5000
b['requiv@secondary'] = 0.85
b['q@binary'] = 0.8/3.0
b.flip_constraint('mass@primary', solve_for='sma@binary')
b['mass@primary'] = 3.0
# In[5]:
print(b.filter(qualifier=['mass', 'requiv', 'teff'], context='component'))
# In[6]:
b['irrad_frac_refl_bol@primary'] = 1.0
b['irrad_frac_refl_bol@secondary'] = 0.6
# We'll also disable any eclipsing effects.
# In[7]:
b['eclipse_method'] = 'only_horizon'
# Now we'll compute the light curves with wilson and horvat irradiation, and plot the relative differences between the two as a function of phase, for several different values of the inclination.
# In[8]:
phases = phoebe.linspace(0,1,101)
b.add_dataset('lc', times=b.to_time(phases))
# In[9]:
for incl in [0,30,60,90]:
b.set_value('incl@binary', incl)
b.run_compute(irrad_method='wilson')
fluxes_wilson = b.get_value('fluxes', context='model')
b.run_compute(irrad_method='horvat')
fluxes_horvat = b.get_value('fluxes', context='model')
plt.plot(phases, (fluxes_wilson-fluxes_horvat)/fluxes_wilson, label='i={}'.format(incl))
plt.xlabel('phase')
plt.ylabel('[F(wilson) - F(horvat)] / F(wilson)')
plt.legend(loc='upper center')
plt.show()
# In[ ]:
| gpl-3.0 | -420,098,855,563,994,800 | 22.476563 | 203 | 0.66589 | false |
rcatwood/Savu | savu/test/jenkins/process_list_tests/i18_xrd_pipeline_test.py | 1 | 1440 | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: tomo_recon
:platform: Unix
:synopsis: runner for tests using the MPI framework
.. moduleauthor:: Mark Basham <[email protected]>
"""
import unittest
from savu.test import test_utils as tu
from savu.test.travis.framework_tests.plugin_runner_test import \
run_protected_plugin_runner
class I18XrdPipelineTest(unittest.TestCase):
#@unittest.skip("Calibration file used in process list is not available as test data")
def test_process(self):
data_file = tu.get_test_data_path('mm.nxs')
process_file = tu.get_test_process_path(
'I18_pipeline_just_xrd_from_raw_filtered_adp_mod2.nxs')
run_protected_plugin_runner(tu.set_options(data_file,
process_file=process_file))
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | 812,467,723,166,280,300 | 35 | 90 | 0.701389 | false |
afodor/pythonExamples | src/viterbi/viterbiExample.py | 1 | 2894 | import random
class MarkovState:
def __init__(self,charsToEmit, emissionProbs,transitionProbs):
self.charsToEmit = charsToEmit
self.emissionProbs = emissionProbs
self.transitionProbs = transitionProbs
def getEmissionIndex(self):
aRand = random.random()
cumulative = 0
index =0
for val in self.emissionProbs:
cumulative += val
if aRand <= cumulative:
return index
index = index + 1
return len(self.emissionProbs) - 1
def getIndexOfEmission(self, char):
for i in range(0, len(self.charsToEmit) ):
if str(self.charsToEmit[i]) == str(char):
return i
raise Exception("Cound not find " + str(char) )
def getTransitionIndex(self):
aRand = random.random()
cumulative = 0
index =0
for val in self.transitionProbs:
cumulative += val
if aRand <= cumulative:
return index
index = index + 1
return len(self.transitionProbs) - 1
def getMaxIndex( iterable ):
val = iterable[0]
index =0
returnVal =0
for i in iterable:
if i > val:
returnVal = index
index = index+1
return returnVal
def getViterbiPath( markovStates, output):
returnPath= []
oldViterbiProbs = []
oldViterbiProbs.append(1) # we are 100% sure we start in the first state
for i in range( 1, len(markovStates) ):
oldViterbiProbs.append( 0)
aTuple = ( oldViterbiProbs, 0)
returnPath.append( aTuple )
for i in range( 0,len(output)):
newViterbiProbs = []
for j in range( 0, len(markovStates)):
state = markovStates[j]
emissionProb = state.emissionProbs[state.getIndexOfEmission(output[i])]
vTimesA=[]
for k in range(0, len(markovStates)):
vTimesA.append (oldViterbiProbs[k] * markovStates[k].transitionProbs[j])
#print( "vTimesA" + str( vTimesA))
maxVal = vTimesA[ getMaxIndex(vTimesA) ]
newViterbiProbs.append( emissionProb * maxVal)
aTuple = (newViterbiProbs,getMaxIndex(newViterbiProbs))
returnPath.append( aTuple)
oldViterbiProbs = newViterbiProbs
return returnPath
dice = ( 1,2,3,4,5,6 )
fairState = MarkovState( dice, (1/6,1/6,1/6,1/6,1/6,1/6), ( 0.95, 0.05) )
loadedState = MarkovState( dice, (1/10,1/10,1/10,1/10,1/10,5/10), ( 0.10, 0.90) )
states = ( fairState, loadedState )
################################################
rolls = "266666"
getViterbiPath( states, rolls)
################################################
rolls = ""
trueStates = ""
state = states[0]
for i in range( 1, 100):
nextState = state.getTransitionIndex()
state = states[ nextState]
trueStates = trueStates + str(nextState)
rolls = rolls + str( dice[ state.getEmissionIndex()] )
rolls
trueStates
viterbiPath = getViterbiPath( states, rolls)
for i in range(0, len(rolls)):
print( str(rolls[i]) + " " + str(trueStates[i])+ " " + str(viterbiPath[i][1]))
################################################
| gpl-2.0 | 38,707,374,708,130,250 | 25.561905 | 81 | 0.630615 | false |
salazardetroya/libmesh | doc/statistics/libmesh_citations.py | 1 | 2340 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
# Number of "papers using libmesh" by year.
#
# Note 1: this does not count citations "only," the authors must have actually
# used libmesh in part of their work. Therefore, these counts do not include
# things like Wolfgang citing us in his papers to show how Deal.II is
# superior...
#
# Note 2: I typically update this data after regenerating the web page,
# since bibtex2html renumbers the references starting from "1" each year.
#
# Note 3: These citations include anything that is not a dissertation/thesis.
# So, some are conference papers, some are journal articles, etc.
#
# Note 4: The libmesh paper came out in 2006, but there are some citations
# prior to that date, obviously. These counts include citations of the
# website libmesh.sf.net as well...
#
# Note 5: Preprints are listed as the "current year + 1" and are constantly
# being moved to their respective years after being published.
data = [
'2004', 5,
'\'05', 2,
'\'06', 13,
'\'07', 8,
'\'08', 23,
'\'09', 30,
'\'10', 24,
'\'11', 37,
'\'12', 50,
'\'13', 78,
'\'14', 60,
'\'15', 11,
'P', 8, # Preprints
'T', 36 # Theses
]
# Extract the x-axis labels from the data array
xlabels = data[0::2]
# Extract the publication counts from the data array
n_papers = data[1::2]
# The number of data points
N = len(xlabels);
# Get a reference to the figure
fig = plt.figure()
# 111 is equivalent to Matlab's subplot(1,1,1) command
ax = fig.add_subplot(111)
# Create an x-axis for plotting
x = np.linspace(1, N, N)
# Width of the bars
width = 0.8
# Make the bar chart. Plot years in blue, preprints and theses in green.
ax.bar(x[0:N-2], n_papers[0:N-2], width, color='b')
ax.bar(x[N-2:N], n_papers[N-2:N], width, color='g')
# Label the x-axis
plt.xlabel('P=Preprints, T=Theses')
# Set up the xtick locations and labels. Note that you have to offset
# the position of the ticks by width/2, where width is the width of
# the bars.
ax.set_xticks(np.linspace(1,N,N) + width/2)
ax.set_xticklabels(xlabels)
# Create a title string
title_string = 'LibMesh Citations, (' + str(sum(n_papers)) + ' Total)'
fig.suptitle(title_string)
# Save as PDF
plt.savefig('libmesh_citations.pdf')
# Local Variables:
# python-indent: 2
# End:
| lgpl-2.1 | 302,535,557,434,097,100 | 26.529412 | 78 | 0.674359 | false |
2947721120/thumbor | thumbor/loaders/http_loader.py | 1 | 4393 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
import re
from functools import partial
from urllib import unquote, quote
from urlparse import urlparse
import tornado.httpclient
from . import LoaderResult
from thumbor.utils import logger
from tornado.concurrent import return_future
QUOTE_PATH_REGEX = re.compile(r"((?:https?:\/\/)?[^/]+)(\/.*)")
def quote_url(url):
try:
url = url.encode('utf-8')
except UnicodeDecodeError:
pass
url = QUOTE_PATH_REGEX.split(url)
if len(url) > 2:
url[2] = quote(url[2])
url = ''.join(url)
return url
def _normalize_url(url):
url = quote_url(unquote(url))
return url if url.startswith('http') else 'http://%s' % url
def validate(context, url, normalize_url_func=_normalize_url):
url = normalize_url_func(url)
res = urlparse(url)
if not res.hostname:
return False
if not context.config.ALLOWED_SOURCES:
return True
for pattern in context.config.ALLOWED_SOURCES:
if re.match('^%s$' % pattern, res.hostname):
return True
return False
def return_contents(response, url, callback, context):
result = LoaderResult()
context.metrics.incr('original_image.status.' + str(response.code))
if response.error:
result.successful = False
if response.code == 599:
# Return a Gateway Timeout status downstream if upstream times out
result.error = LoaderResult.ERROR_TIMEOUT
else:
result.error = LoaderResult.ERROR_NOT_FOUND
logger.warn("ERROR retrieving image {0}: {1}".format(url, str(response.error)))
elif response.body is None or len(response.body) == 0:
result.successful = False
result.error = LoaderResult.ERROR_UPSTREAM
logger.warn("ERROR retrieving image {0}: Empty response.".format(url))
else:
if response.time_info:
for x in response.time_info:
context.metrics.timing('original_image.time_info.' + x, response.time_info[x] * 1000)
context.metrics.timing('original_image.time_info.bytes_per_second', len(response.body) / response.time_info['total'])
result.buffer = response.body
callback(result)
@return_future
def load(context, url, callback, normalize_url_func=_normalize_url):
load_sync(context, url, callback, normalize_url_func)
def load_sync(context, url, callback, normalize_url_func):
using_proxy = context.config.HTTP_LOADER_PROXY_HOST and context.config.HTTP_LOADER_PROXY_PORT
if using_proxy or context.config.HTTP_LOADER_CURL_ASYNC_HTTP_CLIENT:
tornado.httpclient.AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
client = tornado.httpclient.AsyncHTTPClient(max_clients=context.config.HTTP_LOADER_MAX_CLIENTS)
user_agent = None
if context.config.HTTP_LOADER_FORWARD_USER_AGENT:
if 'User-Agent' in context.request_handler.request.headers:
user_agent = context.request_handler.request.headers['User-Agent']
if user_agent is None:
user_agent = context.config.HTTP_LOADER_DEFAULT_USER_AGENT
url = normalize_url_func(url)
req = tornado.httpclient.HTTPRequest(
url=encode(url),
connect_timeout=context.config.HTTP_LOADER_CONNECT_TIMEOUT,
request_timeout=context.config.HTTP_LOADER_REQUEST_TIMEOUT,
follow_redirects=context.config.HTTP_LOADER_FOLLOW_REDIRECTS,
max_redirects=context.config.HTTP_LOADER_MAX_REDIRECTS,
user_agent=user_agent,
proxy_host=encode(context.config.HTTP_LOADER_PROXY_HOST),
proxy_port=context.config.HTTP_LOADER_PROXY_PORT,
proxy_username=encode(context.config.HTTP_LOADER_PROXY_USERNAME),
proxy_password=encode(context.config.HTTP_LOADER_PROXY_PASSWORD),
ca_certs=encode(context.config.HTTP_LOADER_CA_CERTS),
client_key=encode(context.config.HTTP_LOADER_CLIENT_KEY),
client_cert=encode(context.config.HTTP_LOADER_CLIENT_CERT)
)
client.fetch(req, callback=partial(return_contents, url=url, callback=callback, context=context))
def encode(string):
return None if string is None else string.encode('ascii')
| mit | -5,425,623,911,141,380,000 | 33.320313 | 129 | 0.688595 | false |
Strangemother/python-state-machine | scratch/machine_4/integration.py | 1 | 4183 | from tools import color_print as cl
class ConditionIntegrate(object):
def read_node(self, node):
'''
Read the conditions of a node.
'''
if hasattr(node, 'conditions') is False:
return
cnds = node.conditions()
# cl('yellow', 'get conditions for node', node)
self.integrate_conditions(cnds, node)
def integrate_conditions(self, conditions, node):
'''
Implement a list of conditions against one node.
'''
for c in conditions:
self.integrate_condition(c, node)
def integrate_condition(self, cond, node):
'''
Integrate the conditions into the condition runner
'''
if hasattr(self, 'condition_keys') is False:
setattr(self, 'condition_keys', {})
if hasattr(self, 'condition_nodes') is False:
setattr(self, 'condition_nodes', {})
names = self.get_integration_names(node, cond)
# cl('yellow', 'integrate conditions', node, cond, names)
self.append_with_names(names, cond)
# node, condition assications
ck = self.condition_keys
sc = str(cond)
if (sc in ck) is False:
ck[sc] = []
ck[sc].append(node.get_name())
def get_integration_names(self, node, condition):
node_name = node.get_name()
names = (node_name, str(condition), )
return names
def run_conditions(self, conditions, node, value, field):
# pprint(self.conditions._names)
# cl('yellow', 'run conditions', conditions, node, field)
pairs = []
# fetch associated conditions.
# make the condition perform the compare
for cond in conditions:
# get associated nodes for the condition
node_names = self.condition_keys.get(str(cond)) or []
# loop and get associated condition
for nn in node_names:
s = '{0}-{1}'.format(nn, str(cond))
r = self.get(s) or []
f = [(self.nodes.get(nn), set(r),)]
# cl('yellow', 'found', f)
pairs.extend( f )
res = {}
for parent_nodes, _conditions in pairs:
for cnd in _conditions:
for pn in parent_nodes:
v = cnd.validate(pn, node, value, field)
n = '{0}-{1}'.format(pn.get_name(), str(cnd))
res[n]= v
# cl('blue', 'conditions', res)
return res
def find_conditions(self, node, field, value):
n = '{0}_{1}_{2}'.format(node.get_name(), field, value)
# print '+ find conditions on', n
cnds = self.get_conditions(node, field, value)
# cl('yellow', '-- Matches condition', cnds)
return cnds
def get_conditions(self, node, name, value=None):
'''
Get conditions based upon node and name
'''
node_name = node
cl('red', 'get condition', node, name, value)
cnds = self.conditions
if hasattr(node_name, 'get_name'):
node_name = node.get_name()
name1 = '{0}_{1}'.format(node_name, name)
match_names = (name1, )
# exact match string
if value is not None:
vcn = '{0}_{1}_{2}'.format(node_name, name, value)
match_names += (vcn,)
res = []
for _n in match_names:
res += self.get_conditions_by_name(_n) or []
# print 'found conditions', res
return set(res)
def get_conditions_by_name(self, name):
'''
return the conditions matching a name provided.
'''
cnds = self.conditions.get(name)
# print 'get_condition_by_name:', name, cnds
return cnds
def condition_name(self, node, name, *args, **kw):
'''
create a name for a condition string match from the
values passed.
The node is the original object receiving the change.
name denoted the key changing.
returned is a string for the condition
'''
n = node.get_name()
a = [n, args[0]]
s = '_'.join(a)
return s
| mit | -4,974,785,414,804,036,000 | 31.176923 | 65 | 0.537413 | false |
hperala/kontuwikibot | pywikibot/families/wikipedia_family.py | 1 | 24229 | # -*- coding: utf-8 -*-
"""Family module for Wikipedia."""
from __future__ import unicode_literals
from pywikibot import family
__version__ = '$Id: 3f958b3aee3b7b6794546f2ee7f13757d36b0a30 $'
# The Wikimedia family that is known as Wikipedia, the Free Encyclopedia
class Family(family.WikimediaFamily):
"""Family module for Wikipedia."""
closed_wikis = [
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Afar_Wikipedia
'aa',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Choctaw_Wikipedia
'cho',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Hiri_Motu_Wikipedia
'ho',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Herero_Wikipedia
'hz',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Yi_Wikipedia
'ii',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Kwanyama_Wikipedia
'kj',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Kanuri_Wikipedia
'kr',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Marshallese_Wikipedia
'mh',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Moldovan_Wikipedia
'mo',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Muscogee_Wikipedia
'mus',
]
removed_wikis = [
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Ndonga_Wikipedia
'ng',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Siberian_Wikipedia
'ru-sib',
# Klingon, locked in 2005, and moved to http://klingon.wikia.com/
'tlh',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Tokipona_Wikipedia
'tokipona',
]
def __init__(self):
"""Constructor."""
super(Family, self).__init__()
self.name = 'wikipedia'
self.languages_by_size = [
'en', 'sv', 'nl', 'de', 'fr', 'war', 'ru', 'ceb', 'it', 'es', 'vi',
'pl', 'ja', 'pt', 'zh', 'uk', 'ca', 'fa', 'no', 'sh', 'fi', 'ar',
'id', 'cs', 'sr', 'ro', 'ko', 'hu', 'ms', 'tr', 'min', 'eo', 'kk',
'eu', 'sk', 'da', 'bg', 'he', 'lt', 'hy', 'hr', 'sl', 'et', 'uz',
'gl', 'nn', 'vo', 'la', 'simple', 'el', 'hi', 'az', 'th', 'ka',
'ce', 'oc', 'be', 'mk', 'mg', 'new', 'ur', 'tt', 'ta', 'pms', 'cy',
'tl', 'lv', 'bs', 'te', 'be-tarask', 'br', 'ht', 'sq', 'jv', 'lb',
'mr', 'is', 'ml', 'zh-yue', 'bn', 'af', 'ba', 'ga', 'pnb', 'cv',
'fy', 'lmo', 'tg', 'sco', 'my', 'yo', 'an', 'ky', 'sw', 'io', 'ne',
'gu', 'scn', 'bpy', 'nds', 'ku', 'ast', 'qu', 'als', 'su', 'pa',
'kn', 'ckb', 'ia', 'mn', 'nap', 'bug', 'arz', 'bat-smg', 'wa',
'zh-min-nan', 'am', 'map-bms', 'gd', 'yi', 'mzn', 'si', 'fo', 'bar',
'vec', 'nah', 'sah', 'os', 'sa', 'roa-tara', 'li', 'hsb', 'or',
'pam', 'mrj', 'mhr', 'se', 'mi', 'ilo', 'hif', 'bcl', 'gan', 'rue',
'ps', 'glk', 'nds-nl', 'bo', 'vls', 'diq', 'fiu-vro', 'bh', 'xmf',
'tk', 'gv', 'sc', 'co', 'csb', 'hak', 'km', 'kv', 'vep', 'zea',
'crh', 'zh-classical', 'frr', 'eml', 'ay', 'stq', 'udm', 'wuu',
'nrm', 'kw', 'rm', 'szl', 'so', 'koi', 'as', 'lad', 'fur', 'mt',
'dv', 'gn', 'dsb', 'ie', 'pcd', 'sd', 'lij', 'cbk-zam', 'cdo',
'ksh', 'ext', 'mwl', 'gag', 'ang', 'ug', 'ace', 'pi', 'pag', 'nv',
'lez', 'frp', 'sn', 'kab', 'ln', 'myv', 'pfl', 'xal', 'krc', 'haw',
'rw', 'pdc', 'kaa', 'to', 'kl', 'arc', 'nov', 'kbd', 'av', 'bxr',
'lo', 'bjn', 'ha', 'tet', 'tpi', 'na', 'pap', 'lbe', 'jbo', 'ty',
'mdf', 'roa-rup', 'wo', 'tyv', 'ig', 'srn', 'nso', 'kg', 'ab',
'ltg', 'zu', 'om', 'za', 'chy', 'cu', 'rmy', 'tw', 'tn', 'chr',
'mai', 'pih', 'got', 'xh', 'bi', 'sm', 'ss', 'rn', 'ki', 'pnt',
'bm', 'iu', 'ee', 'lg', 'ts', 'fj', 'ak', 'ik', 'st', 'sg', 'ff',
'dz', 'ny', 'ch', 'ti', 've', 'ks', 'tum', 'cr', 'gom', 'lrc',
'azb',
]
# Sites we want to edit but not count as real languages
self.test_codes = ['test', 'test2']
self.langs = dict((lang, '%s.wikipedia.org' % lang) for lang in
self.languages_by_size + self.test_codes)
self.category_redirect_templates = {
'_default': (),
'ar': (u'تحويل تصنيف',
u'تحويلة تصنيف',
u'Category redirect',),
'arz': (u'تحويل تصنيف',),
'cs': (u'Zastaralá kategorie',),
'da': (u'Kategoriomdirigering',),
'en': (u'Category redirect',),
'es': (u'Categoría redirigida',),
'eu': (u'Kategoria redirect',),
'fa': (u'رده بهتر',
u'انتقال رده',),
'fr': (u'Redirection de catégorie',),
'gv': (u'Aastiurey ronney',),
'hi': (u'श्रेणीअनुप्रेषित',
u'Categoryredirect',),
'hu': (u'Kat-redir',
u'Katredir',
u'Kat-redirekt',),
'id': (u'Alih kategori',
u'Alihkategori',),
'ja': (u'Category redirect',),
'ko': (u'분류 넘겨주기',),
'mk': (u'Премести категорија',),
'ml': (u'Category redirect',),
'ms': (u'Pengalihan kategori',
u'Categoryredirect',
u'Category redirect',),
'mt': (u'Redirect kategorija',),
'no': (u'Category redirect',
u'Kategoriomdirigering',
u'Kategori-omdirigering',),
'pl': (u'Przekierowanie kategorii',
u'Category redirect',),
'pt': (u'Redirecionamento de categoria',
u'Redircat',
u'Redirect-categoria',),
'ro': (u'Redirect categorie',),
'ru': (u'Переименованная категория',
u'Categoryredirect',
u'CategoryRedirect',
u'Category redirect',
u'Catredirect',),
'simple': (u'Category redirect',
u'Categoryredirect',
u'Catredirect',),
'sh': (u'Prekat',
u'Preusmeri kategoriju',
u'Preusmjeri kategoriju',
u'Prekategorizuj',
u'Catred',
u'Catredirect',
u'Category redirect'),
'sl': (u'Category redirect',),
'sq': (u'Kategori e zhvendosur',
u'Category redirect',),
'sv': (u'Kategoriomdirigering',
u'Omdirigering kategori',),
'tl': (u'Category redirect',),
'tr': (u'Kategori yönlendirme',
u'Kat redir',),
'uk': (u'Categoryredirect',),
'vi': (u'Đổi hướng thể loại',
u'Thể loại đổi hướng',
u'Chuyển hướng thể loại',
u'Categoryredirect',
u'Category redirect',
u'Catredirect',),
'yi': (u'קאטעגאריע אריבערפירן',),
'zh': (u'分类重定向',
u'Cr',
u'CR',
u'Cat-redirect',),
'zh-yue': (u'Category redirect',
u'分類彈去',
u'分類跳轉',),
}
self.disambcatname = {
'af': u'dubbelsinnig',
'als': u'Begriffsklärung',
'ang': u'Scīrung',
'ast': u'Dixebra',
'ar': u'صفحات توضيح',
'be': u'Disambig',
'be-tarask': u'Вікіпэдыя:Неадназначнасьці',
'bg': u'Пояснителни страници',
'ca': u'Pàgines de desambiguació',
'cbk-zam': u'Desambiguo',
'cs': u'Rozcestníky',
'cy': u'Gwahaniaethu',
'da': u'Flertydig',
'de': u'Begriffsklärung',
'el': u'Αποσαφήνιση',
'en': u'All disambiguation pages',
'eo': u'Apartigiloj',
'es': u'Desambiguación',
'et': u'Täpsustusleheküljed',
'eu': u'Argipen orriak',
'fa': u'صفحههای ابهامزدایی',
'fi': u'Täsmennyssivut',
'fo': u'Fleiri týdningar',
'fr': u'Homonymie',
'fy': u'Trochferwiisside',
'ga': u'Idirdhealáin',
'gl': u'Homónimos',
'he': u'פירושונים',
'hu': u'Egyértelműsítő lapok',
'ia': u'Disambiguation',
'id': u'Disambiguasi',
'io': u'Homonimi',
'is': u'Aðgreiningarsíður',
'it': u'Disambigua',
'ja': u'曖昧さ回避',
'ka': u'მრავალმნიშვნელოვანი',
'kw': u'Folennow klerheans',
'ko': u'동음이의어 문서',
'ku': u'Rûpelên cudakirinê',
'krc': u'Кёб магъаналы терминле',
'ksh': u'Woot met mieh wi ëijnem Senn',
'la': u'Discretiva',
'lb': u'Homonymie',
'li': u'Verdudelikingspazjena',
'ln': u'Bokokani',
'lt': u'Nuorodiniai straipsniai',
'ms': u'Nyahkekaburan',
'mt': u'Diżambigwazzjoni',
'nds': u'Mehrdüdig Begreep',
'nds-nl': u'Wikipedie:Deurverwiespagina',
'nl': u'Wikipedia:Doorverwijspagina',
'nn': u'Fleirtydingssider',
'no': u'Pekere',
'pl': u'Strony ujednoznaczniające',
'pt': u'Desambiguação',
'ro': u'Dezambiguizare',
'ru': u'Многозначные термины',
'scn': u'Disambigua',
'sk': u'Rozlišovacie stránky',
'sl': u'Razločitev',
'sq': u'Kthjellime',
'sr': u'Вишезначна одредница',
'su': u'Disambiguasi',
'sv': u'Förgreningssider',
'szl': u'Zajty ujydnoznačńajůnce',
'th': u'การแก้ความกำกวม',
'tl': u'Paglilinaw',
'tr': u'Anlam ayrım',
'uk': u'Багатозначні геопункти',
'vi': u'Trang định hướng',
'vo': u'Telplänovapads',
'wa': u'Omonimeye',
'zea': u'Wikipedia:Deurverwiespagina',
'zh': u'消歧义',
'zh-min-nan': u'Khu-pia̍t-ia̍h',
}
# families that redirect their interlanguage links here.
self.interwiki_forwarded_from = [
'commons',
'incubator',
'meta',
'species',
'strategy',
'test',
]
# Global bot allowed languages on
# https://meta.wikimedia.org/wiki/Bot_policy/Implementation#Current_implementation
self.cross_allowed = [
'ab', 'ace', 'af', 'ak', 'als', 'am', 'an', 'ang', 'ar', 'arc',
'arz', 'as', 'ast', 'av', 'ay', 'az', 'ba', 'bar', 'bat-smg', 'bcl',
'be', 'be-tarask', 'bg', 'bh', 'bi', 'bjn', 'bm', 'bo', 'bpy', 'bug',
'bxr', 'ca', 'cbk-zam', 'cdo', 'ce', 'ceb', 'ch', 'chr', 'chy',
'ckb', 'co', 'cr', 'crh', 'csb', 'cu', 'cv', 'cy', 'da', 'diq',
'dsb', 'dz', 'ee', 'el', 'eml', 'en', 'eo', 'et', 'eu', 'ext', 'fa',
'ff', 'fi', 'fj', 'fo', 'frp', 'frr', 'fur', 'ga', 'gag', 'gan',
'gd', 'glk', 'gn', 'got', 'gu', 'gv', 'ha', 'hak', 'haw', 'he',
'hi', 'hif', 'hr', 'hsb', 'ht', 'hu', 'hy', 'ia', 'ie', 'ig', 'ik',
'ilo', 'io', 'iu', 'ja', 'jbo', 'jv', 'ka', 'kaa', 'kab', 'kdb',
'kg', 'ki', 'kk', 'kl', 'km', 'kn', 'ko', 'koi', 'krc', 'ks', 'ku',
'kv', 'kw', 'ky', 'la', 'lad', 'lb', 'lbe', 'lez', 'lg', 'li',
'lij', 'lmo', 'ln', 'lo', 'lt', 'ltg', 'lv', 'map-bms', 'mdf', 'mg',
'mhr', 'mi', 'mk', 'ml', 'mn', 'mrj', 'ms', 'mwl', 'my', 'myv',
'mzn', 'na', 'nah', 'nap', 'nds-nl', 'ne', 'new', 'nl', 'no', 'nov',
'nrm', 'nso', 'nv', 'ny', 'oc', 'om', 'or', 'os', 'pa', 'pag',
'pam', 'pap', 'pdc', 'pfl', 'pi', 'pih', 'pms', 'pnb', 'pnt', 'ps',
'qu', 'rm', 'rmy', 'rn', 'roa-rup', 'roa-tara', 'ru', 'rue', 'rw',
'sa', 'sah', 'sc', 'scn', 'sco', 'sd', 'se', 'sg', 'sh', 'si',
'simple', 'sk', 'sm', 'sn', 'so', 'srn', 'ss', 'st', 'stq', 'su',
'sv', 'sw', 'szl', 'ta', 'te', 'tet', 'tg', 'th', 'ti', 'tk', 'tl',
'tn', 'to', 'tpi', 'tr', 'ts', 'tt', 'tum', 'tw', 'ty', 'udm', 'ug',
'uz', 've', 'vec', 'vep', 'vls', 'vo', 'wa', 'war', 'wo', 'wuu',
'xal', 'xh', 'yi', 'yo', 'za', 'zea', 'zh', 'zh-classical',
'zh-min-nan', 'zh-yue', 'zu',
]
# On most Wikipedias page names must start with a capital letter,
# but some languages don't use this.
self.nocapitalize = ['jbo']
# Which languages have a special order for putting interlanguage links,
# and what order is it? If a language is not in interwiki_putfirst,
# alphabetical order on language code is used. For languages that are in
# interwiki_putfirst, interwiki_putfirst is checked first, and
# languages are put in the order given there. All other languages are
# put after those, in code-alphabetical order.
self.alphabetic_sr = [
'ace', 'kbd', 'af', 'ak', 'als', 'am', 'ang', 'ab', 'ar', 'an',
'arc', 'roa-rup', 'frp', 'arz', 'as', 'ast', 'gn', 'av', 'ay', 'az',
'bjn', 'id', 'ms', 'bg', 'bm', 'zh-min-nan', 'nan', 'map-bms', 'jv',
'su', 'ba', 'be', 'be-tarask', 'bh', 'bcl', 'bi', 'bn', 'bo', 'bar',
'bs', 'bpy', 'br', 'bug', 'bxr', 'ca', 'ceb', 'ch', 'cbk-zam', 'sn',
'tum', 'ny', 'cho', 'chr', 'co', 'cy', 'cv', 'cs', 'da', 'dk',
'pdc', 'de', 'nv', 'dsb', 'na', 'dv', 'dz', 'mh', 'et', 'el', 'eml',
'en', 'myv', 'es', 'eo', 'ext', 'eu', 'ee', 'fa', 'hif', 'fo', 'fr',
'fy', 'ff', 'fur', 'ga', 'gv', 'sm', 'gag', 'gd', 'gl', 'gan', 'ki',
'glk', 'got', 'gu', 'ha', 'hak', 'xal', 'haw', 'he', 'hi', 'ho',
'hsb', 'hr', 'hy', 'io', 'ig', 'ii', 'ilo', 'ia', 'ie', 'iu', 'ik',
'os', 'xh', 'zu', 'is', 'it', 'ja', 'ka', 'kl', 'kr', 'pam', 'krc',
'csb', 'kk', 'kw', 'rw', 'ky', 'mrj', 'rn', 'sw', 'km', 'kn', 'ko',
'kv', 'kg', 'ht', 'ks', 'ku', 'kj', 'lad', 'lbe', 'la', 'ltg', 'lv',
'to', 'lb', 'lez', 'lt', 'lij', 'li', 'ln', 'lo', 'jbo', 'lg',
'lmo', 'hu', 'mk', 'mg', 'mt', 'mi', 'min', 'cdo', 'mwl', 'ml',
'mdf', 'mo', 'mn', 'mr', 'mus', 'my', 'mzn', 'nah', 'fj', 'ne',
'nl', 'nds-nl', 'cr', 'new', 'nap', 'ce', 'frr', 'pih', 'no', 'nb',
'nn', 'nrm', 'nov', 'oc', 'mhr', 'or', 'om', 'ng', 'hz', 'uz', 'pa',
'pfl', 'pag', 'pap', 'koi', 'pi', 'pcd', 'pms', 'nds', 'pnb', 'pl',
'pt', 'pnt', 'ps', 'aa', 'kaa', 'crh', 'ty', 'ksh', 'ro', 'rmy',
'rm', 'qu', 'ru', 'rue', 'sa', 'sah', 'se', 'sg', 'sc', 'sco', 'sd',
'stq', 'st', 'nso', 'tn', 'sq', 'si', 'scn', 'simple', 'ss', 'sk',
'sl', 'cu', 'szl', 'so', 'ckb', 'srn', 'sr', 'sh', 'fi', 'sv', 'ta',
'shi', 'tl', 'kab', 'roa-tara', 'tt', 'te', 'tet', 'th', 'ti', 'vi',
'tg', 'tokipona', 'tp', 'tpi', 'chy', 've', 'tr', 'tk', 'tw', 'tyv',
'udm', 'uk', 'ur', 'ug', 'za', 'vec', 'vep', 'vo', 'fiu-vro', 'wa',
'vls', 'war', 'wo', 'wuu', 'ts', 'xmf', 'yi', 'yo', 'diq', 'zea',
'zh', 'zh-tw', 'zh-cn', 'zh-classical', 'zh-yue', 'bat-smg',
]
self.interwiki_putfirst = {
'be-tarask': self.alphabetic,
'en': self.alphabetic,
'et': self.alphabetic_revised,
'fi': self.alphabetic_revised,
'fiu-vro': self.alphabetic_revised,
'fy': self.fyinterwiki,
'he': ['en'],
'hu': ['en'],
'lb': self.alphabetic,
'mk': self.alphabetic,
'ms': self.alphabetic_revised,
'nds': ['nds-nl'],
'nds-nl': ['nds'],
'nn': ['no', 'sv', 'da'] + self.alphabetic,
'no': self.alphabetic,
'nv': ['en', 'es'] + self.alphabetic,
'pdc': ['de', 'en'],
'pl': self.alphabetic,
'simple': self.alphabetic,
'sr': self.alphabetic_sr,
'sv': self.alphabetic,
'te': ['en', 'hi', 'kn', 'ta', 'ml'],
'ur': ['ar', 'fa', 'en'] + self.alphabetic,
'vi': self.alphabetic_revised,
'yi': ['en', 'he', 'de']
}
# Languages that used to be coded in iso-8859-1
self.latin1old = [
'de', 'en', 'et', 'es', 'ia', 'la', 'af', 'cs', 'fr', 'pt', 'sl',
'bs', 'fy', 'vi', 'lt', 'fi', 'it', 'no', 'simple', 'gl', 'eu',
'nds', 'co', 'mi', 'mr', 'id', 'lv', 'sw', 'tt', 'uk', 'vo', 'ga',
'na', 'es', 'nl', 'da', 'dk', 'sv', 'test']
self.crossnamespace[0] = {
'_default': {
'pt': [102],
'als': [104],
'ar': [104],
'de': [4],
'en': [12],
'es': [104],
'fi': [4],
'fr': [104],
'hr': [102],
'lt': [104],
},
'km': {
'_default': [0, 4, 12],
},
# wrong wikipedia namespace alias
'mzn': {
'_default': [0, 4],
},
}
self.crossnamespace[1] = {
'_default': {
'pt': [103],
'als': [105],
'ar': [105],
'en': [13],
'es': [105],
'fi': [5],
'fr': [105],
'hr': [103],
'lt': [105],
},
}
self.crossnamespace[4] = {
'_default': {
'_default': [12],
},
'de': {
'_default': [0, 10, 12],
'el': [100, 12],
'es': [104, 12],
},
'fi': {
'_default': [0, 12]
},
'mzn': {
'_default': [0, 12]
},
}
self.crossnamespace[5] = {
'fi': {
'_default': [1]}
}
self.crossnamespace[12] = {
'_default': {
'_default': [4],
},
'en': {
'_default': [0, 4],
},
}
self.crossnamespace[13] = {
'en': {
'_default': [0],
},
}
self.crossnamespace[102] = {
'pt': {
'_default': [0],
'als': [0, 104],
'ar': [0, 104],
'es': [0, 104],
'fr': [0, 104],
'lt': [0, 104]
},
'hr': {
'_default': [0],
'als': [0, 104],
'ar': [0, 104],
'es': [0, 104],
'fr': [0, 104],
'lt': [0, 104]
},
}
self.crossnamespace[103] = {
'pt': {
'_default': [1],
'als': [1, 105],
'es': [1, 105],
'fr': [1, 105],
'lt': [1, 105]
},
'hr': {
'_default': [1],
'als': [1, 105],
'es': [1, 105],
'fr': [1, 105],
'lt': [1, 105]
},
}
self.crossnamespace[104] = {
'als': {
'_default': [0],
'pt': [0, 102],
'hr': [0, 102],
},
'ar': {
'_default': [0, 100],
'hr': [0, 102],
'pt': [0, 102],
},
'es': {
'_default': [0],
'pt': [0, 102],
'hr': [0, 102],
},
'fr': {
'_default': [0],
'pt': [0, 102],
'hr': [0, 102],
},
'lt': {
'_default': [0],
'pt': [0, 102],
'hr': [0, 102],
},
}
self.crossnamespace[105] = {
'als': {
'_default': [1],
'pt': [0, 103],
'hr': [0, 103],
},
'ar': {
'_default': [1, 101],
},
'es': {
'_default': [1],
'pt': [0, 103],
'hr': [0, 103],
},
'fr': {
'_default': [1],
'pt': [0, 103],
'hr': [0, 103],
},
'lt': {
'_default': [1],
'pt': [0, 103],
'hr': [0, 103],
},
}
def get_known_families(self, site):
"""Override the family interwiki prefixes for each site."""
# In Swedish Wikipedia 's:' is part of page title not a family
# prefix for 'wikisource'.
if site.language() == 'sv':
d = self.known_families.copy()
d.pop('s')
d['src'] = 'wikisource'
return d
else:
return self.known_families
def code2encodings(self, code):
"""Return a list of historical encodings for a specific site."""
# Historic compatibility
if code == 'pl':
return 'utf-8', 'iso8859-2'
if code == 'ru':
return 'utf-8', 'iso8859-5'
if code in self.latin1old:
return 'utf-8', 'iso-8859-1'
return self.code2encoding(code),
# Subpages for documentation.
# TODO: List is incomplete, to be completed for missing languages.
# TODO: Remove comments for appropriate pages
self.doc_subpages = {
'_default': ((u'/doc', ),
['ar', 'bn', 'cs', 'da', 'en', 'es',
'hu', 'id', 'ilo', 'ja', 'ms',
'ms', 'pt', 'ro', 'ru', 'simple', 'vi', 'zh']
),
'ca': (u'/ús', ),
'de': (u'Doku', u'/Meta'),
'dsb': (u'/Dokumentacija', ),
'eu': (u'txantiloi dokumentazioa', u'/dok'),
'fa': (u'/doc', u'/توضیحات'),
# fi: no idea how to handle this type of subpage at :Metasivu:
'fi': ((), ),
'fr': (u'/documentation', ),
'hsb': (u'/Dokumentacija', ),
'it': (u'/Man', ),
'ka': (u'/ინფო', ),
'ko': (u'/설명문서', ),
'no': (u'/dok', ),
'nn': (u'/dok', ),
'pl': (u'/opis', ),
'sk': (u'/Dokumentácia', ),
'sv': (u'/dok', ),
'uk': (u'/Документація', ),
}
def shared_data_repository(self, code, transcluded=False):
"""Return the shared data repository for this site."""
if code in ['test', 'test2']:
return ('test', 'wikidata')
else:
return ('wikidata', 'wikidata')
| mit | 3,577,370,888,441,289,700 | 39.868739 | 105 | 0.394371 | false |
caio1982/capomastro | jenkins/utils.py | 1 | 4525 | from urlparse import urljoin
import xml.etree.ElementTree as ET
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template import Template, Context
from django.utils import timezone
from django.utils.text import slugify
PARAMETERS = ".//properties/hudson.model.ParametersDefinitionProperty/parameterDefinitions/"
def get_notifications_url(base, server):
"""
Returns the full URL for notifications given a base.
"""
url = urljoin(base, reverse("jenkins_notifications"))
return url + "?server=%d" % server.pk
def get_context_for_template(job, server):
"""
Returns a Context for the Job XML templating.
"""
defaults = DefaultSettings({"NOTIFICATION_HOST": "http://localhost"})
url = get_notifications_url(defaults.NOTIFICATION_HOST, server)
context_vars = {
"notifications_url": url,
"job": job,
"jobtype": job.jobtype,
}
return Context(context_vars)
def get_job_xml_for_upload(job, server):
"""
Return config_xml run through the template mechanism.
"""
template = Template(job.jobtype.config_xml)
context = get_context_for_template(job, server)
# We need to strip leading/trailing whitespace in order to avoid having the
# <?xml> PI not in the first line of the document.
job_xml = template.render(context).strip()
requestor = JenkinsParameter(
"REQUESTOR", "The username requesting the build", "")
job_xml = add_parameter_to_job(requestor, job_xml)
return job_xml
def generate_job_name(jobtype):
"""
Generates a "unique" id.
"""
return "%s_%s" % (slugify(jobtype.name), timezone.now().strftime("%s"))
class DefaultSettings(object):
"""
Allows easy configuration of default values for a Django settings.
e.g. values = DefaultSettings({"NOTIFICATION_HOST": "http://example.com"})
values.NOTIFICATION_HOST # returns the value from the default django
settings, or the default if not provided in the settings.
"""
class _defaults(object):
pass
def __init__(self, defaults):
self.defaults = self._defaults()
for key, value in defaults.iteritems():
setattr(self.defaults, key, value)
def __getattr__(self, key):
return getattr(settings, key, getattr(self.defaults, key))
def get_value_or_none(self, key):
"""
Doesn't raise an AttributeError in the event that the key doesn't
exist.
"""
return getattr(settings, key, getattr(self.defaults, key, None))
def parse_parameters_from_job(body):
"""
Parses the supplied XML document and extracts all parameters, returns a
list of dictionaries with the details of the parameters extracted.
"""
result = []
root = ET.fromstring(body)
for param in root.findall(PARAMETERS):
item = {}
for param_element in param.findall("./"):
item[param_element.tag] = param_element.text
result.append(item)
return result
class JenkinsParameter(object):
"""Represents a parameter for a Jenkins job."""
definition = "TextParameterDefinition"
def __init__(self, name, description, default):
self.name = name
self.description = description
self.default = default
@property
def type(self):
return "hudson.model.%s" % self.definition
def parameter_to_xml(param):
"""
Converts a JenkinsParameter to the XML element representation for a Jenkins
job parameter.
"""
element = ET.Element(param.type)
ET.SubElement(element, "name").text = param.name
ET.SubElement(element, "description").text = param.description
ET.SubElement(element, "defaultValue").text = param.default
return element
def add_parameter_to_job(param, job):
"""
Adds a JenkinsParameter to an existing job xml document, returns the job XML
as a string.
# NOTE: This does nothing to check whether or not the parameter already
# exists.
"""
root = ET.fromstring(job)
parameters_container = root.find(PARAMETERS[:-1])
if parameters_container is None:
parameters = root.find(".//hudson.model.ParametersDefinitionProperty")
if parameters is None:
parameters = ET.SubElement(root, "hudson.model.ParametersDefinitionProperty")
parameters_container = ET.SubElement(parameters, "parameterDefinitions")
parameters_container.append(parameter_to_xml(param))
return ET.tostring(root)
| mit | 3,326,860,643,878,144,500 | 29.993151 | 92 | 0.671381 | false |
koomik/CouchPotatoServer | couchpotato/core/plugins/log/main.py | 1 | 4216 | import os
import traceback
from couchpotato.api import addApiView
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.environment import Env
log = CPLog(__name__)
class Logging(Plugin):
def __init__(self):
addApiView('logging.get', self.get, docs = {
'desc': 'Get the full log file by number',
'params': {
'nr': {'desc': 'Number of the log to get.'}
},
'return': {'type': 'object', 'example': """{
'success': True,
'log': string, //Log file
'total': int, //Total log files available
}"""}
})
addApiView('logging.partial', self.partial, docs = {
'desc': 'Get a partial log',
'params': {
'type': {'desc': 'Type of log', 'type': 'string: all(default), error, info, debug'},
'lines': {'desc': 'Number of lines. Last to first. Default 30'},
},
'return': {'type': 'object', 'example': """{
'success': True,
'log': string, //Log file
}"""}
})
addApiView('logging.clear', self.clear, docs = {
'desc': 'Remove all the log files'
})
addApiView('logging.log', self.log, docs = {
'desc': 'Log errors',
'params': {
'type': {'desc': 'Type of logging, default "error"'},
'**kwargs': {'type': 'object', 'desc': 'All other params will be printed in the log string.'},
}
})
def get(self, nr = 0, **kwargs):
nr = tryInt(nr)
current_path = None
total = 1
for x in range(0, 50):
path = '%s%s' % (Env.get('log_path'), '.%s' % x if x > 0 else '')
# Check see if the log exists
if not os.path.isfile(path):
total = x - 1
break
# Set current path
if x is nr:
current_path = path
log_content = ''
if current_path:
f = open(current_path, 'r')
log_content = f.read()
return {
'success': True,
'log': toUnicode(log_content),
'total': total,
}
def partial(self, type = 'all', lines = 30, **kwargs):
total_lines = tryInt(lines)
log_lines = []
for x in range(0, 50):
path = '%s%s' % (Env.get('log_path'), '.%s' % x if x > 0 else '')
# Check see if the log exists
if not os.path.isfile(path):
break
f = open(path, 'r')
reversed_lines = toUnicode(f.read()).split('[0m\n')
reversed_lines.reverse()
brk = False
for line in reversed_lines:
if type == 'all' or '%s ' % type.upper() in line:
log_lines.append(line)
if len(log_lines) >= total_lines:
brk = True
break
if brk:
break
log_lines.reverse()
return {
'success': True,
'log': '[0m\n'.join(log_lines),
}
def clear(self, **kwargs):
for x in range(0, 50):
path = '%s%s' % (Env.get('log_path'), '.%s' % x if x > 0 else '')
if not os.path.isfile(path):
continue
try:
# Create empty file for current logging
if x is 0:
self.createFile(path, '')
else:
os.remove(path)
except:
log.error('Couldn\'t delete file "%s": %s', (path, traceback.format_exc()))
return {
'success': True
}
def log(self, type = 'error', **kwargs):
try:
log_message = 'API log: %s' % kwargs
try:
getattr(log, type)(log_message)
except:
log.error(log_message)
except:
log.error('Couldn\'t log via API: %s', kwargs)
return {
'success': True
}
| gpl-3.0 | -6,851,123,021,384,806,000 | 26.376623 | 110 | 0.463947 | false |
WilsonWangTHU/neural_graph_evolution | graph_util/structure_mapper.py | 1 | 7819 | #!/usr/bin/env python2
# -----------------------------------------------------------------------------
# @author:
# Tingwu Wang, Jun 23rd, 2017
# -----------------------------------------------------------------------------
import init_path
from util import logger
from . import mujoco_parser
import numpy as np
_BASE_DIR = init_path.get_base_dir()
def map_output(transfer_env, i_value, added_constant, gnn_option_list):
'''
@brief:
i_value could be the logstd (1, num_action), policy_output/w
(64, num_action), policy_output/b (1, num_action)
'''
assert len(gnn_option_list) == 4
i_value = np.transpose(i_value) # make the num_action to the front
ienv, oenv = [env + '-v1' for env in transfer_env.split('2')]
ienv_info = mujoco_parser.parse_mujoco_graph(
ienv,
gnn_node_option=gnn_option_list[0],
root_connection_option=gnn_option_list[1],
gnn_output_option=gnn_option_list[2],
gnn_embedding_option=gnn_option_list[3]
)
oenv_info = mujoco_parser.parse_mujoco_graph(
oenv,
gnn_node_option=gnn_option_list[0],
root_connection_option=gnn_option_list[1],
gnn_output_option=gnn_option_list[2],
gnn_embedding_option=gnn_option_list[3]
)
if len(i_value.shape) > 1:
o_value = np.zeros([len(oenv_info['output_list']), i_value.shape[1]])
else:
# the b matrix
o_value = np.zeros([len(oenv_info['output_list'])])
assert len(i_value) == len(ienv_info['output_list'])
ienv_node_name_list = [node['name'] for node in ienv_info['tree']]
for output_id, output_node_id in enumerate(oenv_info['output_list']):
# get the name of the joint
node_name = oenv_info['tree'][output_node_id]['name']
# if the node is alreay in the input environment?
if node_name in ienv_node_name_list:
if ienv_node_name_list.index(node_name) not in \
ienv_info['output_list']:
logger.warning('Missing joint: {}'.format(node_name))
continue
o_value[output_id] = i_value[
ienv_info['output_list'].index(
ienv_node_name_list.index(node_name)
)
]
else:
# the name format: "@type_@name_@number", e.g.: joint_leg_1
assert len(node_name.split('_')) == 3
# find all the repetitive node and calculate the average
repetitive_struct_node_list = [
ienv_node_name_list.index(name)
for name in ienv_node_name_list
if node_name.split('_')[1] == name.split('_')[1]
]
num_reptitive_nodes = float(len(repetitive_struct_node_list))
assert len(repetitive_struct_node_list) >= 1
for i_node_id in repetitive_struct_node_list:
o_value[output_id] += i_value[
ienv_info['output_list'].index(i_node_id)
] / num_reptitive_nodes
return np.transpose(o_value) + added_constant
def map_input(transfer_env, i_value, added_constant, gnn_option_list):
assert len(gnn_option_list) == 4
ienv, oenv = [env + '-v1' for env in transfer_env.split('2')]
ienv_info = mujoco_parser.parse_mujoco_graph(
ienv,
gnn_node_option=gnn_option_list[0],
root_connection_option=gnn_option_list[1],
gnn_output_option=gnn_option_list[2],
gnn_embedding_option=gnn_option_list[3]
)
oenv_info = mujoco_parser.parse_mujoco_graph(
oenv,
gnn_node_option=gnn_option_list[0],
root_connection_option=gnn_option_list[1],
gnn_output_option=gnn_option_list[2],
gnn_embedding_option=gnn_option_list[3]
)
o_value = np.zeros([oenv_info['debug_info']['ob_size'], i_value.shape[1]])
assert len(i_value) == ienv_info['debug_info']['ob_size']
ienv_node_name_list = [node['name'] for node in ienv_info['tree']]
for output_id, output_node_id in oenv_info['input_dict'].items():
# get the name of the joint
node_name = oenv_info['tree'][output_id]['name']
# if the node is alreay in the input environment?
if node_name in ienv_node_name_list:
o_value[output_node_id] = i_value[
ienv_info['input_dict'][
ienv_node_name_list.index(node_name)
]
]
else:
continue
return o_value
def map_transfer_env_running_mean(ienv, oenv, running_mean_info,
observation_size,
gnn_node_option, root_connection_option,
gnn_output_option, gnn_embedding_option):
# parse the mujoco information
ienv_info = mujoco_parser.parse_mujoco_graph(
ienv,
gnn_node_option=gnn_node_option,
root_connection_option=root_connection_option,
gnn_output_option=gnn_output_option,
gnn_embedding_option=gnn_embedding_option
)
oenv_info = mujoco_parser.parse_mujoco_graph(
oenv,
gnn_node_option=gnn_node_option,
root_connection_option=root_connection_option,
gnn_output_option=gnn_output_option,
gnn_embedding_option=gnn_embedding_option
)
i_running_mean_info = running_mean_info
# we start the running mean by cutting the mean to 0.1
start_coeff = 1
o_running_mean_info = {
'step': i_running_mean_info['step'] * start_coeff,
'mean': np.zeros([observation_size]),
'variance': np.zeros([observation_size]),
'square_sum': np.zeros([observation_size]),
'sum': np.zeros([observation_size])
}
ienv_node_name_list = [node['name'] for node in ienv_info['tree']]
for node, oenv_digit in oenv_info['input_dict'].items():
node_name = oenv_info['tree'][node]['name']
# if the node is alreay in the input environment?
if node_name in ienv_node_name_list:
ienv_digit = ienv_info['input_dict'][
ienv_node_name_list.index(node_name)
]
assert len(ienv_digit) == len(oenv_digit)
# assign the value!
for key in ['square_sum', 'sum']:
o_running_mean_info[key][oenv_digit] = \
i_running_mean_info[key][ienv_digit] * start_coeff
for key in ['mean', 'variance']:
o_running_mean_info[key][oenv_digit] = \
i_running_mean_info[key][ienv_digit]
else:
# the name format: "@type_@name_@number", e.g.: joint_leg_1
assert len(node_name.split('_')) == 3
# find all the repetitive node and calculate the average
repetitive_struct_node_list = [
ienv_node_name_list.index(name)
for name in ienv_node_name_list
if node_name.split('_')[1] == name.split('_')[1]
]
assert len(repetitive_struct_node_list) >= 1
num_reptitive_nodes = float(len(repetitive_struct_node_list))
for i_node_id in repetitive_struct_node_list:
ienv_digit = ienv_info['input_dict'][i_node_id]
assert len(ienv_digit) == len(oenv_digit)
# assign the value!
for key in ['square_sum', 'sum']:
o_running_mean_info[key][oenv_digit] += \
i_running_mean_info[key][ienv_digit] * \
start_coeff / num_reptitive_nodes
for key in ['mean', 'variance']:
o_running_mean_info[key][oenv_digit] += \
i_running_mean_info[key][ienv_digit] / \
num_reptitive_nodes
return o_running_mean_info
| mit | 7,428,175,684,621,202,000 | 40.152632 | 79 | 0.553012 | false |
vitordeatorreao/amproj | amproj/datasets/dataset.py | 1 | 3520 | """Base class for a memory representation of any dataset"""
class Dataset:
"""Represents a dataset read to memory"""
def __init__(self, feature_names=[]):
"""Initializes a new instance of Dataset
Parameters
----------
feature_names : list<str>, optional
List of names of the features present in this dataset.
"""
if type(feature_names) != list:
raise TypeError(
"The `feature_names` argument must be of type list")
self.features = [str(name) for name in feature_names]
self.data = []
def add_datapoint(self, datapoint):
"""Adds a datapoint to the dataset
Parameters
----------
datapoint : list
A list containing the feature values.
"""
point = {} # datapoint to be built and inserted in the dataset
if len(self.features) == 0: # in case there are no feature names
if len(self.data) > 0 and len(self.data[0]) != len(datapoint):
raise TypeError("The new datapoint must be of the same size " +
"as the other datapoints. The new datapoint " +
"has size " + str(len(datapoint)) + ", but " +
"the other datapoints have size " +
str(len(self.data[0])) + ".")
i = 0
for value in datapoint:
point["feature" + str(i)] = self.__tryparse__(value)
i += 1
self.data.append(point)
return
if len(datapoint) != len(self.features):
raise TypeError("The datapoint must be of the same size as " +
"the features list. The features list has size " +
str(len(self.features)) + " and the datapoint " +
"has size " + str(len(datapoint)) + ". The " +
"datapoint is " + str(datapoint))
i = 0
for feature_name in self.features:
point[feature_name] = self.__tryparse__(datapoint[i])
i += 1
self.data.append(point) # actually adds the datapoint to the set
def __len__(self):
"""Returns the length of this dataset"""
return len(self.data)
def __iter__(self):
"""Iterates through the objects in this dataset"""
return iter(self.data)
def __getitem__(self, key):
"""Gets the dataset at the specified index"""
if type(key) != int:
raise TypeError("The index must be an integer, instead got " + key)
return self.data[key]
def __tryparse__(self, value):
"""Parses the value into int, float or string
Parameters
----------
value : str
A value to be parsed.
Returns
-------
val : int, float or str
The value after being parsed to its correct type.
Notes
-----
The value will be parsed in a try and error way. First, we try to cast
it to int. If that fails, we try to cast it to float. And if that fails
as well, we simply return it as string.
"""
value = value.strip()
if type(value) != str:
return value
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
return value
| gpl-2.0 | 126,932,584,548,431,800 | 34.555556 | 79 | 0.511932 | false |
lmorchard/badger | apps/socialconnect/views.py | 1 | 12314 | import urllib, urllib2
import cgi
import os
from django.conf import settings
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth import login, authenticate, REDIRECT_FIELD_NAME
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import Site
from django.utils.http import urlquote
from django.utils import simplejson as json
from django.utils.translation import ugettext, ugettext_lazy as _
from django.contrib import messages
from oauthtwitter import OAuthApi
from oauth import oauth
import oauthtwitter
from pinax.apps.account.utils import get_default_redirect, user_display
from pinax.apps.account.views import login as account_login
from socialconnect.utils import Router, BaseView
from socialconnect.forms import OauthSignupForm
from socialconnect.models import UserOauthAssociation
TWITTER_CONSUMER_KEY = getattr(settings, 'TWITTER_CONSUMER_KEY', 'YOUR_KEY')
TWITTER_CONSUMER_SECRET = getattr(settings, 'TWITTER_CONSUMER_SECRET', 'YOUR_SECRET')
FACEBOOK_CONSUMER_KEY = getattr(settings, 'FACEBOOK_CONSUMER_KEY', 'YOUR_KEY')
FACEBOOK_CONSUMER_SECRET = getattr(settings, 'FACEBOOK_CONSUMER_SECRET', 'YOUR_SECRET')
class ManagementView(BaseView):
"""Connection management view, mainly for removing associations"""
urlname_pattern = 'socialconnect_manage_%s'
def do_associations(self, request):
v = self.require_login(request)
if v is not True: return v
if request.method == "POST":
a_id = request.POST.get('id', None)
try:
assoc = UserOauthAssociation.objects.get(
user = request.user, id = a_id)
messages.add_message(request, messages.SUCCESS,
ugettext("""
Successfully deleted connection to %(auth_type)s
screen name %(username)s.
""") % {
"auth_type": assoc.auth_type,
"username": assoc.username
}
)
assoc.delete()
except UserOauthAssociation.DoesNotExist:
pass
return HttpResponseRedirect(reverse(
self.urlname_pattern % 'associations'))
associations = UserOauthAssociation.objects.filter(user=request.user)
return self.render(request, 'associations.html', {
'associations': associations
})
class BaseAuthView(BaseView):
def do_signin(self, request):
"""Perform sign in via OAuth"""
request.session['socialconnect_mode'] = request.GET.get('mode', 'signin')
next = request.GET.get(REDIRECT_FIELD_NAME, '/')
if next:
request.session['redirect_to'] = next
return HttpResponseRedirect(self.get_signin_url(request))
def do_callback(self, request):
"""Handle response from OAuth permit/deny"""
# TODO: Handle OAuth denial!
mode = request.session.get('socialconnect_mode', None)
profile = self.get_profile_from_callback(request)
if not profile: return HttpResponse(status=400)
request.session[self.session_profile] = profile
success_url = get_default_redirect(request, REDIRECT_FIELD_NAME)
if not success_url or 'None' == success_url:
success_url = '/'
try:
# Try looking for an association to perform a login.
assoc = UserOauthAssociation.objects.filter(
auth_type=self.auth_type,
profile_id=profile['id'],
username=profile['username']
).get()
if 'connect' == mode:
messages.add_message(request, messages.ERROR,
ugettext("""This service is already connected to another
account!""")
)
return HttpResponseRedirect(reverse(
ManagementView().urlname_pattern % 'associations'))
else:
self.log_in_user(request, assoc.user)
return HttpResponseRedirect(success_url)
except UserOauthAssociation.DoesNotExist:
# No association found, so...
if not request.user.is_authenticated():
# If no login session, bounce to registration
return HttpResponseRedirect(reverse(
self.urlname_pattern % 'register'))
else:
# If there's a login session, create an association to the
# currently logged in user.
assoc = self.create_association(request, request.user, profile)
del request.session[self.session_profile]
if 'connect' == mode:
return HttpResponseRedirect(reverse(
ManagementView().urlname_pattern % 'associations'))
else:
return HttpResponseRedirect(success_url)
def get_registration_form_class(self, request):
return OauthSignupForm
def do_register(self, request):
"""Handle registration with association"""
# Ensure that Twitter signin details are present in the session
profile = request.session.get(self.session_profile, None)
if not profile: return HttpResponse(status=400)
RegistrationForm = self.get_registration_form_class(request)
success_url = get_default_redirect(request, REDIRECT_FIELD_NAME)
if request.method != "POST":
# Pre-fill form with suggested info based in Twitter signin
form = RegistrationForm(initial = self.initial_from_profile(profile))
else:
form = RegistrationForm(request.POST)
if form.is_valid():
user = form.save(request=request)
assoc = self.create_association(request, user, profile)
self.log_in_user(request, user)
return HttpResponseRedirect(success_url)
return self.render(request, 'register.html', {
'form': form,
'auth_label': self.auth_label,
'signin_url': reverse(self.urlname_pattern % 'signin'),
"action": request.path,
})
def create_association(self, request, user, profile):
"""Create an association between this user and the given profile"""
assoc = UserOauthAssociation(
user=user,
auth_type=self.auth_type,
profile_id=profile['id'],
username=profile['username'],
access_token=profile['access_token']
)
assoc.save()
messages.add_message(request, messages.SUCCESS,
ugettext("""
Successfully associated %(user)s with %(auth_label)s
screen name %(username)s.
""") % {
"user": user_display(request.user),
"auth_label": self.auth_label,
"username": profile['username']
}
)
def suggest_nickname(self, nickname):
"Return a suggested nickname that has not yet been taken"
from django.contrib.auth.models import User
if not nickname:
return ''
original_nickname = nickname
suffix = None
while User.objects.filter(username = nickname).count():
if suffix is None:
suffix = 1
else:
suffix += 1
nickname = original_nickname + str(suffix)
return nickname
def log_in_user(self, request, user):
# Remember, openid might be None (after registration with none set)
from django.contrib.auth import login
# Nasty but necessary - annotate user and pretend it was the regular
# auth backend. This is needed so django.contrib.auth.get_user works:
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
class TwitterAuthView(BaseAuthView):
auth_type = "twitter"
auth_label = _("Twitter")
urlname_pattern = 'socialconnect_twitter_%s'
consumer_key = TWITTER_CONSUMER_KEY
consumer_secret = TWITTER_CONSUMER_SECRET
session_access_token = 'twitter_access_token'
session_profile = 'twitter_profile'
def get_signin_url(self, request):
twitter = OAuthApi(self.consumer_key, self.consumer_secret)
request_token = twitter.getRequestToken()
request.session['twitter_request_token'] = request_token.to_string()
return twitter.getSigninURL(request_token)
def get_profile_from_callback(self, request):
"""Extract the access token and profile details from OAuth callback"""
request_token = request.session.get('twitter_request_token', None)
if not request_token: return None
token = oauth.OAuthToken.from_string(request_token)
if token.key != request.GET.get('oauth_token', 'no-token'):
return HttpResponse(status=400)
twitter = OAuthApi(self.consumer_key, self.consumer_secret, token)
access_token = twitter.getAccessToken()
twitter = oauthtwitter.OAuthApi(self.consumer_key,
self.consumer_secret, access_token)
try:
profile = twitter.GetUserInfo()
except:
return None
return {
'access_token': access_token.to_string(),
'id': profile.id,
'username': profile.screen_name,
'fullname': profile.name,
'email': '',
}
def initial_from_profile(self, profile):
fullname = profile['fullname']
first_name, last_name = '', ''
if fullname:
bits = fullname.split()
first_name = bits[0]
if len(bits) > 1:
last_name = ' '.join(bits[1:])
return {
'username': self.suggest_nickname(profile.get('username','')),
'first_name': first_name,
'last_name': last_name,
'email': ''
}
class FacebookAuthView(BaseAuthView):
auth_type = "facebook"
auth_label = _("Facebook")
urlname_pattern = 'socialconnect_facebook_%s'
consumer_key = FACEBOOK_CONSUMER_KEY
consumer_secret = FACEBOOK_CONSUMER_SECRET
session_access_token = 'facebook_access_token'
session_profile = 'facebook_profile'
def get_signin_url(self, request):
args = {
'client_id': self.consumer_key,
'redirect_uri': request.build_absolute_uri(
reverse('socialconnect_facebook_callback')),
'scope': 'publish_stream,offline_access'
}
return ("https://graph.facebook.com/oauth/authorize?" +
urllib.urlencode(args))
def get_profile_from_callback(self, request):
code = request.GET.get('code', None)
args = {
'client_id': self.consumer_key,
'client_secret': self.consumer_secret,
'redirect_uri': request.build_absolute_uri(
reverse('socialconnect_facebook_callback')),
'code': code,
}
access_token_url = ()
response = cgi.parse_qs(urllib2.urlopen(
"https://graph.facebook.com/oauth/access_token?" +
urllib.urlencode(args)
).read())
access_token = response["access_token"][-1]
profile = json.load(urllib2.urlopen("https://graph.facebook.com/me?" +
urllib.urlencode(dict(access_token=access_token))))
return {
'access_token': access_token,
'id': profile['id'],
'username': os.path.basename(profile.get('link','')),
'fullname': profile.get('name', ''),
'first_name': profile.get('first_name', ''),
'last_name': profile.get('last_name', ''),
'email': '',
}
def initial_from_profile(self, profile):
return {
'username': self.suggest_nickname(profile.get('username','')),
'first_name': profile.get('first_name', ''),
'last_name': profile.get('last_name', ''),
'email': ''
}
| bsd-3-clause | 6,301,588,463,236,984,000 | 36.889231 | 87 | 0.598993 | false |
napalm-automation/napalm | test/ios/TestIOSDriver.py | 1 | 6582 | # Copyright 2015 Spotify AB. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Tests for IOSDriver."""
import unittest
from napalm.ios import ios
from napalm.base.test.base import TestConfigNetworkDriver, TestGettersNetworkDriver
import re
class TestConfigIOSDriver(unittest.TestCase, TestConfigNetworkDriver):
"""Configuration Tests for IOSDriver.
Core file operations:
load_replace_candidate Tested
load_merge_candidate Tested
compare_config Tested
commit_config Tested
discard_config Tested
rollback Tested
Internal methods:
_enable_confirm Tested
_disable_confirm Tested
_gen_rollback_cfg Tested as part of rollback
_check_file_exists Tested
Misc methods:
open Tested
close Skipped
normalize_compare_config Tested (indirectly)
scp_file Tested
gen_full_path Tested
"""
@classmethod
def setUpClass(cls):
"""Executed when the class is instantiated."""
ip_addr = "127.0.0.1"
username = "vagrant"
password = "vagrant"
cls.vendor = "ios"
optional_args = {"port": 12204, "dest_file_system": "bootflash:"}
cls.device = ios.IOSDriver(
ip_addr, username, password, optional_args=optional_args
)
cls.device.open()
# Setup initial state
cls.device.load_replace_candidate(filename="%s/initial.conf" % cls.vendor)
cls.device.commit_config()
def test_ios_only_confirm(self):
"""Test _disable_confirm() and _enable_confirm().
_disable_confirm() changes router config so it doesn't prompt for confirmation
_enable_confirm() reenables this
"""
# Set initial device configuration
self.device.load_replace_candidate(filename="%s/initial.conf" % self.vendor)
self.device.commit_config()
# Verify initial state
output = self.device.device.send_command("show run | inc file prompt")
output = output.strip()
self.assertEqual(output, "")
# Disable confirmation
self.device._disable_confirm()
output = self.device.device.send_command("show run | inc file prompt")
output = output.strip()
self.assertEqual(output, "file prompt quiet")
# Reenable confirmation
self.device._enable_confirm()
output = self.device.device.send_command("show run | inc file prompt")
output = output.strip()
self.assertEqual(output, "")
def test_ios_only_gen_full_path(self):
"""Test gen_full_path() method."""
output = self.device._gen_full_path(self.device.candidate_cfg)
self.assertEqual(output, self.device.dest_file_system + "/candidate_config.txt")
output = self.device._gen_full_path(self.device.rollback_cfg)
self.assertEqual(output, self.device.dest_file_system + "/rollback_config.txt")
output = self.device._gen_full_path(self.device.merge_cfg)
self.assertEqual(output, self.device.dest_file_system + "/merge_config.txt")
output = self.device._gen_full_path(
filename="running-config", file_system="system:"
)
self.assertEqual(output, "system:/running-config")
def test_ios_only_check_file_exists(self):
"""Test _check_file_exists() method."""
self.device.load_replace_candidate(filename="%s/initial.conf" % self.vendor)
valid_file = self.device._check_file_exists(
self.device.dest_file_system + "/candidate_config.txt"
)
self.assertTrue(valid_file)
invalid_file = self.device._check_file_exists(
self.device.dest_file_system + "/bogus_999.txt"
)
self.assertFalse(invalid_file)
class TestGetterIOSDriver(unittest.TestCase, TestGettersNetworkDriver):
"""Getters Tests for IOSDriver.
Get operations:
get_lldp_neighbors
get_facts
get_interfaces
get_bgp_neighbors
get_interfaces_counters
"""
@classmethod
def setUpClass(cls):
"""Executed when the class is instantiated."""
cls.mock = True
username = "vagrant"
ip_addr = "192.168.0.234"
password = "vagrant"
cls.vendor = "ios"
optional_args = {}
optional_args["dest_file_system"] = "flash:"
cls.device = ios.IOSDriver(
ip_addr, username, password, optional_args=optional_args
)
if cls.mock:
cls.device.device = FakeIOSDevice()
else:
cls.device.open()
def test_ios_only_bgp_time_conversion(self):
"""Verify time conversion static method."""
test_cases = {
"1w0d": 604800,
"00:14:23": 863,
"00:13:40": 820,
"00:00:21": 21,
"00:00:13": 13,
"00:00:49": 49,
"1d11h": 126000,
"1d17h": 147600,
"8w5d": 5270400,
"1y28w": 48470400,
"never": -1,
}
for bgp_time, result in test_cases.items():
self.assertEqual(self.device.bgp_time_conversion(bgp_time), result)
class FakeIOSDevice:
"""Class to fake a IOS Device."""
@staticmethod
def read_txt_file(filename):
"""Read a txt file and return its content."""
with open(filename) as data_file:
return data_file.read()
def send_command_expect(self, command, **kwargs):
"""Fake execute a command in the device by just returning the content of a file."""
cmd = re.sub(r"[\[\]\*\^\+\s\|]", "_", command)
output = self.read_txt_file("ios/mock_data/{}.txt".format(cmd))
return str(output)
def send_command(self, command, **kwargs):
"""Fake execute a command in the device by just returning the content of a file."""
return self.send_command_expect(command)
if __name__ == "__main__":
unittest.main()
| apache-2.0 | -1,167,232,957,800,592,000 | 32.753846 | 91 | 0.614707 | false |
emulbreh/vacuous | vacuous/backends/dulwich/tasks.py | 1 | 1530 | from StringIO import StringIO
from celery.task import Task
from celery.task.sets import TaskSet, subtask
from dulwich.protocol import ReceivableProtocol
from dulwich.server import ReceivePackHandler
from vacuous.backends import load_backend
from vacuous.backends.dulwich.utils import WebBackend
from vacuous.tasks import SyncTask
class _ReceivePackHandler(ReceivePackHandler):
def _apply_pack(self, refs):
result = super(_ReceivePackHandler, self)._apply_pack(refs)
status = dict(result)
self._good_refs = []
for oldsha, newsha, ref in refs:
if status[ref] == 'ok':
self._good_refs.append((oldsha, newsha, ref))
return result
class ReceivePackTask(Task):
def run(self, flavor, repo_path, data):
backend = load_backend(flavor, repo_path, cache=False)
out = StringIO()
proto = ReceivableProtocol(StringIO(data).read, out.write)
handler = _ReceivePackHandler(WebBackend(), [backend], proto, stateless_rpc=True)
handler.handle()
sync_tasks = []
for oldrev, newrev, name in handler._good_refs:
if name.startswith('refs/heads/'):
branch = name[11:]
sync_tasks.append(subtask(SyncTask, args=[backend.flavor, backend.path, oldrev, newrev, branch]))
if sync_tasks:
taskset = TaskSet(tasks=sync_tasks)
taskset.apply_async().join()
return out.getvalue(), handler._good_refs
| mit | -4,407,668,046,472,185,000 | 33.772727 | 113 | 0.640523 | false |
hashimmm/iiifoo | testutils/manifest_validator.py | 1 | 3963 | from testutils.presentation_api.implementations.manifest_factory.loader import \
ManifestReader
from iiifoo_utils import image_id_from_canvas_id
def validate(manifestjson, logger=None):
"""Validate a given manifest json object."""
mr = ManifestReader(manifestjson)
try:
r = mr.read()
js = r.toJSON()
except Exception as e:
if logger:
logger.exception(e)
print e
valid = False
else:
valid = True
print mr.get_warnings()
if logger:
logger.warn(mr.get_warnings())
return valid
def assert_equal(first, second):
assert first == second, \
"%s != %s" % (first, second)
def ensure_manifest_details_integrity(detailsobj, manifest_json, start=0):
sequences = manifest_json['sequences']
canvases = sequences[0]['canvases']
no_of_images = len(detailsobj['images'])
assert_equal(len(sequences), 1)
assert_equal(len(canvases), no_of_images + start)
for i in xrange(start, start+no_of_images):
assert_equal(canvases[i]['label'],
detailsobj['images'][i-start]['name'])
assert_equal(canvases[i]['width'],
detailsobj['images'][i-start]['width'])
assert_equal(canvases[i]['height'],
detailsobj['images'][i-start]['height'])
image_resource = canvases[i]['images'][0]['resource']
assert_equal(image_resource['service']['@id'],
detailsobj['images'][i-start]['path'])
assert_equal(image_resource['width'],
detailsobj['images'][i-start]['width'])
assert_equal(image_resource['height'],
detailsobj['images'][i-start]['height'])
def ensure_manifest_schema_conformance(manifest_json):
assert validate(manifest_json), \
"Manifest json: \n%s\n is invalid" % manifest_json
def check_updated_details(manifest_json, details):
sequences = manifest_json['sequences']
canvases = sequences[0]['canvases']
new_image_ids = [image['image_id'] for image in details['images']]
updated_canvases = [canvas for canvas in canvases
if image_id_from_canvas_id(canvas["@id"])
in new_image_ids]
updated_canvases = {image_id_from_canvas_id(canvas["@id"]): canvas
for canvas in updated_canvases}
assert_equal(manifest_json['label'], details['manifest_label'])
for image_id in new_image_ids:
canvas = updated_canvases[image_id]
image = [image for image in details['images']
if image['image_id'] == image_id][0]
assert_equal(canvas['label'], image['name'])
assert_equal(canvas['width'], image['width'])
assert_equal(canvas['height'], image['height'])
image_resource = canvas['images'][0]['resource']
assert_equal(image_resource['service']['@id'], image['path'])
assert_equal(image_resource['width'], image['width'])
assert_equal(image_resource['height'], image['height'])
def check_annotations_in_list(annotation_list, imageobj):
resources = annotation_list['resources']
relevant_resources = []
for resource in resources:
if image_id_from_canvas_id(resource['on']) == imageobj['image_id']:
relevant_resources.append(resource)
list_comments = [item['resource']['chars'] for item in resources
if item['motivation'] == "oa:commenting"]
list_transcriptions = [item['resource']['chars'] for item in resources
if item['resource']['@type'] == "cnt:ContentAsText"]
for comment in imageobj.get('comments', []):
assert comment['text'] in list_comments, \
"Comment %s not found" % comment['text']
for transcription in imageobj.get('transcriptions', []):
assert transcription['text'] in list_transcriptions, \
"Comment %s not found" % transcription['text']
| mit | 5,603,357,598,368,924,000 | 40.715789 | 80 | 0.607368 | false |
radicalbit/ambari | ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hive.py | 1 | 23158 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import glob
from urlparse import urlparse
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
from resource_management.libraries.functions.get_config import get_config
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.core.resources.service import ServiceConfig
from resource_management.core.resources.system import File, Execute, Directory
from resource_management.core.source import StaticFile, Template, DownloadSource, InlineTemplate
from resource_management.core.shell import as_user
from resource_management.libraries.functions.is_empty import is_empty
from resource_management.libraries.resources.xml_config import XmlConfig
from resource_management.libraries.functions.format import format
from resource_management.core.exceptions import Fail
from resource_management.core.shell import as_sudo
from resource_management.core.shell import quote_bash_args
from resource_management.core.logger import Logger
from resource_management.core import utils
from resource_management.libraries.functions.setup_atlas_hook import has_atlas_in_cluster, setup_atlas_hook
from resource_management.libraries.functions.security_commons import update_credential_provider_path
from ambari_commons.constants import SERVICE
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons import OSConst
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def hive(name=None):
import params
hive_client_conf_path = format("{stack_root}/current/{component_directory}/conf")
# Permissions 644 for conf dir (client) files, and 600 for conf.server
mode_identified = 0644 if params.hive_config_dir == hive_client_conf_path else 0600
Directory(params.hive_etc_dir_prefix,
mode=0755
)
# We should change configurations for client as well as for server.
# The reason is that stale-configs are service-level, not component.
Logger.info("Directories to fill with configs: %s" % str(params.hive_conf_dirs_list))
for conf_dir in params.hive_conf_dirs_list:
fill_conf_dir(conf_dir)
params.hive_site_config = update_credential_provider_path(params.hive_site_config,
'hive-site',
os.path.join(params.hive_conf_dir, 'hive-site.jceks'),
params.hive_user,
params.user_group
)
XmlConfig("hive-site.xml",
conf_dir=params.hive_config_dir,
configurations=params.hive_site_config,
configuration_attributes=params.config['configuration_attributes']['hive-site'],
owner=params.hive_user,
group=params.user_group,
mode=mode_identified)
# Generate atlas-application.properties.xml file
if params.enable_atlas_hook:
atlas_hook_filepath = os.path.join(params.hive_config_dir, params.atlas_hook_filename)
setup_atlas_hook(SERVICE.HIVE, params.hive_atlas_application_properties, atlas_hook_filepath, params.hive_user, params.user_group)
File(format("{hive_config_dir}/hive-env.sh"),
owner=params.hive_user,
group=params.user_group,
content=InlineTemplate(params.hive_env_sh_template),
mode=mode_identified
)
# On some OS this folder could be not exists, so we will create it before pushing there files
Directory(params.limits_conf_dir,
create_parents = True,
owner='root',
group='root'
)
File(os.path.join(params.limits_conf_dir, 'hive.conf'),
owner='root',
group='root',
mode=0644,
content=Template("hive.conf.j2")
)
if params.security_enabled:
File(os.path.join(params.hive_config_dir, 'zkmigrator_jaas.conf'),
owner=params.hive_user,
group=params.user_group,
content=Template("zkmigrator_jaas.conf.j2")
)
File(format("/usr/lib/ambari-agent/{check_db_connection_jar_name}"),
content = DownloadSource(format("{jdk_location}{check_db_connection_jar_name}")),
mode = 0644,
)
if name != "client":
setup_non_client()
if name == 'hiveserver2':
setup_hiveserver2()
if name == 'metastore':
setup_metastore()
def setup_hiveserver2():
import params
File(params.start_hiveserver2_path,
mode=0755,
content=Template(format('{start_hiveserver2_script}'))
)
File(os.path.join(params.hive_server_conf_dir, "hadoop-metrics2-hiveserver2.properties"),
owner=params.hive_user,
group=params.user_group,
content=Template("hadoop-metrics2-hiveserver2.properties.j2"),
mode=0600
)
XmlConfig("hiveserver2-site.xml",
conf_dir=params.hive_server_conf_dir,
configurations=params.config['configurations']['hiveserver2-site'],
configuration_attributes=params.config['configuration_attributes']['hiveserver2-site'],
owner=params.hive_user,
group=params.user_group,
mode=0600)
# copy tarball to HDFS feature not supported
if not (params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major)):
params.HdfsResource(params.webhcat_apps_dir,
type="directory",
action="create_on_execute",
owner=params.webhcat_user,
mode=0755
)
# Create webhcat dirs.
if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
params.HdfsResource(params.hcat_hdfs_user_dir,
type="directory",
action="create_on_execute",
owner=params.webhcat_user,
mode=params.hcat_hdfs_user_mode
)
params.HdfsResource(params.webhcat_hdfs_user_dir,
type="directory",
action="create_on_execute",
owner=params.webhcat_user,
mode=params.webhcat_hdfs_user_mode
)
# ****** Begin Copy Tarballs ******
# *********************************
# if copy tarball to HDFS feature supported copy mapreduce.tar.gz and tez.tar.gz to HDFS
if params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major):
copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
copy_to_hdfs("tez", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs)
# Always copy pig.tar.gz and hive.tar.gz using the appropriate mode.
# This can use a different source and dest location to account
copy_to_hdfs("pig",
params.user_group,
params.hdfs_user,
file_mode=params.tarballs_mode,
custom_source_file=params.pig_tar_source,
custom_dest_file=params.pig_tar_dest_file,
skip=params.sysprep_skip_copy_tarballs_hdfs)
copy_to_hdfs("hive",
params.user_group,
params.hdfs_user,
file_mode=params.tarballs_mode,
custom_source_file=params.hive_tar_source,
custom_dest_file=params.hive_tar_dest_file,
skip=params.sysprep_skip_copy_tarballs_hdfs)
wildcard_tarballs = ["sqoop", "hadoop_streaming"]
for tarball_name in wildcard_tarballs:
source_file_pattern = eval("params." + tarball_name + "_tar_source")
dest_dir = eval("params." + tarball_name + "_tar_dest_dir")
if source_file_pattern is None or dest_dir is None:
continue
source_files = glob.glob(source_file_pattern) if "*" in source_file_pattern else [source_file_pattern]
for source_file in source_files:
src_filename = os.path.basename(source_file)
dest_file = os.path.join(dest_dir, src_filename)
copy_to_hdfs(tarball_name,
params.user_group,
params.hdfs_user,
file_mode=params.tarballs_mode,
custom_source_file=source_file,
custom_dest_file=dest_file,
skip=params.sysprep_skip_copy_tarballs_hdfs)
# ******* End Copy Tarballs *******
# *********************************
# if warehouse directory is in DFS
if not params.whs_dir_protocol or params.whs_dir_protocol == urlparse(params.default_fs).scheme:
# Create Hive Metastore Warehouse Dir
params.HdfsResource(params.hive_apps_whs_dir,
type="directory",
action="create_on_execute",
owner=params.hive_user,
group=params.user_group,
mode=params.hive_apps_whs_mode
)
else:
Logger.info(format("Not creating warehouse directory '{hive_apps_whs_dir}', as the location is not in DFS."))
# Create Hive User Dir
params.HdfsResource(params.hive_hdfs_user_dir,
type="directory",
action="create_on_execute",
owner=params.hive_user,
mode=params.hive_hdfs_user_mode
)
if not is_empty(params.hive_exec_scratchdir) and not urlparse(params.hive_exec_scratchdir).path.startswith("/tmp"):
params.HdfsResource(params.hive_exec_scratchdir,
type="directory",
action="create_on_execute",
owner=params.hive_user,
group=params.hdfs_user,
mode=0777) # Hive expects this dir to be writeable by everyone as it is used as a temp dir
params.HdfsResource(None, action="execute")
def setup_non_client():
import params
Directory(params.hive_pid_dir,
create_parents = True,
cd_access='a',
owner=params.hive_user,
group=params.user_group,
mode=0755)
Directory(params.hive_log_dir,
create_parents = True,
cd_access='a',
owner=params.hive_user,
group=params.user_group,
mode=0755)
Directory(params.hive_var_lib,
create_parents = True,
cd_access='a',
owner=params.hive_user,
group=params.user_group,
mode=0755)
if params.hive_jdbc_target is not None and not os.path.exists(params.hive_jdbc_target):
jdbc_connector(params.hive_jdbc_target, params.hive_previous_jdbc_jar)
if params.hive2_jdbc_target is not None and not os.path.exists(params.hive2_jdbc_target):
jdbc_connector(params.hive2_jdbc_target, params.hive2_previous_jdbc_jar)
def setup_metastore():
import params
if params.hive_metastore_site_supported:
hivemetastore_site_config = get_config("hivemetastore-site")
if hivemetastore_site_config:
XmlConfig("hivemetastore-site.xml",
conf_dir=params.hive_server_conf_dir,
configurations=params.config['configurations']['hivemetastore-site'],
configuration_attributes=params.config['configuration_attributes']['hivemetastore-site'],
owner=params.hive_user,
group=params.user_group,
mode=0600)
File(os.path.join(params.hive_server_conf_dir, "hadoop-metrics2-hivemetastore.properties"),
owner=params.hive_user,
group=params.user_group,
content=Template("hadoop-metrics2-hivemetastore.properties.j2"),
mode=0600
)
File(params.start_metastore_path,
mode=0755,
content=StaticFile('startMetastore.sh')
)
if not is_empty(params.hive_exec_scratchdir):
dirPathStr = urlparse(params.hive_exec_scratchdir).path
pathComponents = dirPathStr.split("/")
if dirPathStr.startswith("/tmp") and len(pathComponents) > 2:
Directory (params.hive_exec_scratchdir,
owner = params.hive_user,
create_parents = True,
mode=0777)
def create_metastore_schema():
import params
create_schema_cmd = format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
"{hive_schematool_bin}/schematool -initSchema "
"-dbType {hive_metastore_db_type} "
"-userName {hive_metastore_user_name} "
"-passWord {hive_metastore_user_passwd!p} -verbose")
check_schema_created_cmd = as_user(format("export HIVE_CONF_DIR={hive_server_conf_dir} ; "
"{hive_schematool_bin}/schematool -info "
"-dbType {hive_metastore_db_type} "
"-userName {hive_metastore_user_name} "
"-passWord {hive_metastore_user_passwd!p} -verbose"), params.hive_user)
# HACK: in cases with quoted passwords and as_user (which does the quoting as well) !p won't work for hiding passwords.
# Fixing it with the hack below:
quoted_hive_metastore_user_passwd = quote_bash_args(quote_bash_args(params.hive_metastore_user_passwd))
if quoted_hive_metastore_user_passwd[0] == "'" and quoted_hive_metastore_user_passwd[-1] == "'" \
or quoted_hive_metastore_user_passwd[0] == '"' and quoted_hive_metastore_user_passwd[-1] == '"':
quoted_hive_metastore_user_passwd = quoted_hive_metastore_user_passwd[1:-1]
Logger.sensitive_strings[repr(check_schema_created_cmd)] = repr(check_schema_created_cmd.replace(
format("-passWord {quoted_hive_metastore_user_passwd}"), "-passWord " + utils.PASSWORDS_HIDE_STRING))
Execute(create_schema_cmd,
not_if = check_schema_created_cmd,
user = params.hive_user
)
"""
Writes configuration files required by Hive.
"""
def fill_conf_dir(component_conf_dir):
import params
hive_client_conf_path = os.path.realpath(format("{stack_root}/current/{component_directory}/conf"))
component_conf_dir = os.path.realpath(component_conf_dir)
mode_identified_for_file = 0644 if component_conf_dir == hive_client_conf_path else 0600
mode_identified_for_dir = 0755 if component_conf_dir == hive_client_conf_path else 0700
Directory(component_conf_dir,
owner=params.hive_user,
group=params.user_group,
create_parents = True,
mode=mode_identified_for_dir
)
if 'mapred-site' in params.config['configurations']:
XmlConfig("mapred-site.xml",
conf_dir=component_conf_dir,
configurations=params.config['configurations']['mapred-site'],
configuration_attributes=params.config['configuration_attributes']['mapred-site'],
owner=params.hive_user,
group=params.user_group,
mode=mode_identified_for_file)
File(format("{component_conf_dir}/hive-default.xml.template"),
owner=params.hive_user,
group=params.user_group,
mode=mode_identified_for_file
)
File(format("{component_conf_dir}/hive-env.sh.template"),
owner=params.hive_user,
group=params.user_group,
mode=mode_identified_for_file
)
# Create hive-log4j.properties and hive-exec-log4j.properties
# in /etc/hive/conf and not in /etc/hive2/conf
if params.log4j_version == '1':
log4j_exec_filename = 'hive-exec-log4j.properties'
if (params.log4j_exec_props != None):
File(format("{component_conf_dir}/{log4j_exec_filename}"),
mode=mode_identified_for_file,
group=params.user_group,
owner=params.hive_user,
content=InlineTemplate(params.log4j_exec_props)
)
elif (os.path.exists("{component_conf_dir}/{log4j_exec_filename}.template")):
File(format("{component_conf_dir}/{log4j_exec_filename}"),
mode=mode_identified_for_file,
group=params.user_group,
owner=params.hive_user,
content=StaticFile(format("{component_conf_dir}/{log4j_exec_filename}.template"))
)
log4j_filename = 'hive-log4j.properties'
if (params.log4j_props != None):
File(format("{component_conf_dir}/{log4j_filename}"),
mode=mode_identified_for_file,
group=params.user_group,
owner=params.hive_user,
content=InlineTemplate(params.log4j_props)
)
elif (os.path.exists("{component_conf_dir}/{log4j_filename}.template")):
File(format("{component_conf_dir}/{log4j_filename}"),
mode=mode_identified_for_file,
group=params.user_group,
owner=params.hive_user,
content=StaticFile(format("{component_conf_dir}/{log4j_filename}.template"))
)
if params.parquet_logging_properties is not None:
File(format("{component_conf_dir}/parquet-logging.properties"),
mode = mode_identified_for_file,
group = params.user_group,
owner = params.hive_user,
content = params.parquet_logging_properties)
def jdbc_connector(target, hive_previous_jdbc_jar):
"""
Shared by Hive Batch, Hive Metastore, and Hive Interactive
:param target: Target of jdbc jar name, which could be for any of the components above.
"""
import params
if not params.jdbc_jar_name:
return
if params.hive_jdbc_driver in params.hive_jdbc_drivers_list and params.hive_use_existing_db:
environment = {
"no_proxy": format("{ambari_server_hostname}")
}
if hive_previous_jdbc_jar and os.path.isfile(hive_previous_jdbc_jar):
File(hive_previous_jdbc_jar, action='delete')
# TODO: should be removed after ranger_hive_plugin will not provide jdbc
if params.prepackaged_jdbc_name != params.jdbc_jar_name:
Execute(('rm', '-f', params.prepackaged_ojdbc_symlink),
path=["/bin", "/usr/bin/"],
sudo = True)
File(params.downloaded_custom_connector,
content = DownloadSource(params.driver_curl_source))
# maybe it will be more correcvly to use db type
if params.sqla_db_used:
untar_sqla_type2_driver = ('tar', '-xvf', params.downloaded_custom_connector, '-C', params.tmp_dir)
Execute(untar_sqla_type2_driver, sudo = True)
Execute(format("yes | {sudo} cp {jars_path_in_archive} {hive_lib}"))
Directory(params.jdbc_libs_dir,
create_parents = True)
Execute(format("yes | {sudo} cp {libs_path_in_archive} {jdbc_libs_dir}"))
Execute(format("{sudo} chown -R {hive_user}:{user_group} {hive_lib}/*"))
else:
Execute(('cp', '--remove-destination', params.downloaded_custom_connector, target),
#creates=target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
path=["/bin", "/usr/bin/"],
sudo = True)
else:
#for default hive db (Mysql)
Execute(('cp', '--remove-destination', format('/usr/share/java/{jdbc_jar_name}'), target),
#creates=target, TODO: uncomment after ranger_hive_plugin will not provide jdbc
path=["/bin", "/usr/bin/"],
sudo=True
)
pass
File(target,
mode = 0644,
)
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def hive(name=None):
import params
XmlConfig("hive-site.xml",
conf_dir = params.hive_conf_dir,
configurations = params.config['configurations']['hive-site'],
owner=params.hive_user,
configuration_attributes=params.config['configuration_attributes']['hive-site']
)
if name in ["hiveserver2","metastore"]:
# Manually overriding service logon user & password set by the installation package
service_name = params.service_map[name]
ServiceConfig(service_name,
action="change_user",
username = params.hive_user,
password = Script.get_password(params.hive_user))
Execute(format("cmd /c hadoop fs -mkdir -p {hive_warehouse_dir}"), logoutput=True, user=params.hadoop_user)
if name == 'metastore':
if params.init_metastore_schema:
check_schema_created_cmd = format('cmd /c "{hive_bin}\\hive.cmd --service schematool -info '
'-dbType {hive_metastore_db_type} '
'-userName {hive_metastore_user_name} '
'-passWord {hive_metastore_user_passwd!p}'
'&set EXITCODE=%ERRORLEVEL%&exit /B %EXITCODE%"', #cmd "feature", propagate the process exit code manually
hive_bin=params.hive_bin,
hive_metastore_db_type=params.hive_metastore_db_type,
hive_metastore_user_name=params.hive_metastore_user_name,
hive_metastore_user_passwd=params.hive_metastore_user_passwd)
try:
Execute(check_schema_created_cmd)
except Fail:
create_schema_cmd = format('cmd /c {hive_bin}\\hive.cmd --service schematool -initSchema '
'-dbType {hive_metastore_db_type} '
'-userName {hive_metastore_user_name} '
'-passWord {hive_metastore_user_passwd!p}',
hive_bin=params.hive_bin,
hive_metastore_db_type=params.hive_metastore_db_type,
hive_metastore_user_name=params.hive_metastore_user_name,
hive_metastore_user_passwd=params.hive_metastore_user_passwd)
Execute(create_schema_cmd,
user = params.hive_user,
logoutput=True
)
if name == "hiveserver2":
if params.hive_execution_engine == "tez":
# Init the tez app dir in hadoop
script_file = __file__.replace('/', os.sep)
cmd_file = os.path.normpath(os.path.join(os.path.dirname(script_file), "..", "files", "hiveTezSetup.cmd"))
Execute("cmd /c " + cmd_file, logoutput=True, user=params.hadoop_user)
| apache-2.0 | -4,470,074,220,140,164,000 | 42.205224 | 147 | 0.630883 | false |
seanchen/taiga-back | taiga/users/serializers.py | 1 | 5786 | # Copyright (C) 2014 Andrey Antukh <[email protected]>
# Copyright (C) 2014 Jesús Espino <[email protected]>
# Copyright (C) 2014 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core import validators
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from taiga.base.api import serializers
from taiga.base.fields import PgArrayField
from taiga.projects.models import Project
from .models import User, Role
from .services import get_photo_or_gravatar_url, get_big_photo_or_gravatar_url
import re
######################################################
## User
######################################################
class ContactProjectDetailSerializer(serializers.ModelSerializer):
class Meta:
model = Project
fields = ("id", "slug", "name")
class UserSerializer(serializers.ModelSerializer):
full_name_display = serializers.SerializerMethodField("get_full_name_display")
photo = serializers.SerializerMethodField("get_photo")
big_photo = serializers.SerializerMethodField("get_big_photo")
roles = serializers.SerializerMethodField("get_roles")
projects_with_me = serializers.SerializerMethodField("get_projects_with_me")
class Meta:
model = User
# IMPORTANT: Maintain the UserAdminSerializer Meta up to date
# with this info (including there the email)
fields = ("id", "username", "full_name", "full_name_display",
"color", "bio", "lang", "theme", "timezone", "is_active",
"photo", "big_photo", "roles", "projects_with_me")
read_only_fields = ("id",)
def validate_username(self, attrs, source):
value = attrs[source]
validator = validators.RegexValidator(re.compile('^[\w.-]+$'), _("invalid username"),
_("invalid"))
try:
validator(value)
except ValidationError:
raise serializers.ValidationError(_("Required. 255 characters or fewer. Letters, "
"numbers and /./-/_ characters'"))
if (self.object and
self.object.username != value and
User.objects.filter(username=value).exists()):
raise serializers.ValidationError(_("Invalid username. Try with a different one."))
return attrs
def get_full_name_display(self, obj):
return obj.get_full_name() if obj else ""
def get_photo(self, user):
return get_photo_or_gravatar_url(user)
def get_big_photo(self, user):
return get_big_photo_or_gravatar_url(user)
def get_roles(self, user):
return user.memberships. order_by("role__name").values_list("role__name", flat=True).distinct()
def get_projects_with_me(self, user):
request = self.context.get("request", None)
requesting_user = request and request.user or None
if not requesting_user or not requesting_user.is_authenticated():
return []
else:
project_ids = requesting_user.memberships.values_list("project__id", flat=True)
memberships = user.memberships.filter(project__id__in=project_ids)
project_ids = memberships.values_list("project__id", flat=True)
projects = Project.objects.filter(id__in=project_ids)
return ContactProjectDetailSerializer(projects, many=True).data
class UserAdminSerializer(UserSerializer):
class Meta:
model = User
# IMPORTANT: Maintain the UserSerializer Meta up to date
# with this info (including here the email)
fields = ("id", "username", "full_name", "full_name_display", "email",
"color", "bio", "lang", "theme", "timezone", "is_active", "photo",
"big_photo")
read_only_fields = ("id", "email")
class BasicInfoSerializer(UserSerializer):
class Meta:
model = User
fields = ("username", "full_name_display","photo", "big_photo")
class RecoverySerializer(serializers.Serializer):
token = serializers.CharField(max_length=200)
password = serializers.CharField(min_length=6)
class ChangeEmailSerializer(serializers.Serializer):
email_token = serializers.CharField(max_length=200)
class CancelAccountSerializer(serializers.Serializer):
cancel_token = serializers.CharField(max_length=200)
######################################################
## Role
######################################################
class RoleSerializer(serializers.ModelSerializer):
members_count = serializers.SerializerMethodField("get_members_count")
permissions = PgArrayField(required=False)
class Meta:
model = Role
fields = ('id', 'name', 'permissions', 'computable', 'project', 'order', 'members_count')
i18n_fields = ("name",)
def get_members_count(self, obj):
return obj.memberships.count()
class ProjectRoleSerializer(serializers.ModelSerializer):
class Meta:
model = Role
fields = ('id', 'name', 'slug', 'order', 'computable')
i18n_fields = ("name",)
| agpl-3.0 | 3,606,832,449,048,560,600 | 37.56 | 103 | 0.639523 | false |
FireBladeNooT/Medusa_1_6 | medusa/notifiers/plex.py | 1 | 10632 | # coding=utf-8
# Author: Dustyn Gibson <[email protected]>
#
# This file is part of Medusa.
#
# Medusa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Medusa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Medusa. If not, see <http://www.gnu.org/licenses/>.
import re
from six import iteritems
from .. import app, common, logger
from ..helper.exceptions import ex
from ..helpers import getURL, make_session
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
class Notifier(object):
def __init__(self):
self.headers = {
'X-Plex-Device-Name': 'Medusa',
'X-Plex-Product': 'Medusa Notifier',
'X-Plex-Client-Identifier': common.USER_AGENT,
'X-Plex-Version': '2016.02.10'
}
self.session = make_session()
@staticmethod
def _notify_pht(message, title='Medusa', host=None, username=None, password=None, force=False): # pylint: disable=too-many-arguments
"""Internal wrapper for the notify_snatch and notify_download functions
Args:
message: Message body of the notice to send
title: Title of the notice to send
host: Plex Home Theater(s) host:port
username: Plex username
password: Plex password
force: Used for the Test method to override config safety checks
Returns:
Returns a list results in the format of host:ip:result
The result will either be 'OK' or False, this is used to be parsed by the calling function.
"""
from . import kodi_notifier
# suppress notifications if the notifier is disabled but the notify options are checked
if not app.USE_PLEX_CLIENT and not force:
return False
host = host or app.PLEX_CLIENT_HOST
username = username or app.PLEX_CLIENT_USERNAME
password = password or app.PLEX_CLIENT_PASSWORD
return kodi_notifier._notify_kodi(message, title=title, host=host, username=username, password=password, force=force, dest_app="PLEX") # pylint: disable=protected-access
##############################################################################
# Public functions
##############################################################################
def notify_snatch(self, ep_name, is_proper):
if app.PLEX_NOTIFY_ONSNATCH:
self._notify_pht(ep_name, common.notifyStrings[(common.NOTIFY_SNATCH, common.NOTIFY_SNATCH_PROPER)[is_proper]])
def notify_download(self, ep_name):
if app.PLEX_NOTIFY_ONDOWNLOAD:
self._notify_pht(ep_name, common.notifyStrings[common.NOTIFY_DOWNLOAD])
def notify_subtitle_download(self, ep_name, lang):
if app.PLEX_NOTIFY_ONSUBTITLEDOWNLOAD:
self._notify_pht(ep_name + ': ' + lang, common.notifyStrings[common.NOTIFY_SUBTITLE_DOWNLOAD])
def notify_git_update(self, new_version='??'):
if app.NOTIFY_ON_UPDATE:
update_text = common.notifyStrings[common.NOTIFY_GIT_UPDATE_TEXT]
title = common.notifyStrings[common.NOTIFY_GIT_UPDATE]
if update_text and title and new_version:
self._notify_pht(update_text + new_version, title)
def notify_login(self, ipaddress=""):
if app.NOTIFY_ON_LOGIN:
update_text = common.notifyStrings[common.NOTIFY_LOGIN_TEXT]
title = common.notifyStrings[common.NOTIFY_LOGIN]
if update_text and title and ipaddress:
self._notify_pht(update_text.format(ipaddress), title)
def test_notify_pht(self, host, username, password):
return self._notify_pht('This is a test notification from Medusa',
'Test Notification', host, username, password, force=True)
def test_notify_pms(self, host, username, password, plex_server_token):
return self.update_library(host=host, username=username, password=password,
plex_server_token=plex_server_token, force=True)
def update_library(self, ep_obj=None, host=None, # pylint: disable=too-many-arguments, too-many-locals, too-many-statements, too-many-branches
username=None, password=None,
plex_server_token=None, force=False):
"""Handles updating the Plex Media Server host via HTTP API
Plex Media Server currently only supports updating the whole video library and not a specific path.
Returns:
Returns None for no issue, else a string of host with connection issues
"""
if not (app.USE_PLEX_SERVER and app.PLEX_UPDATE_LIBRARY) and not force:
return None
host = host or app.PLEX_SERVER_HOST
if not host:
logger.log(u'PLEX: No Plex Media Server host specified, check your settings', logger.DEBUG)
return False
if not self.get_token(username, password, plex_server_token):
logger.log(u'PLEX: Error getting auth token for Plex Media Server, check your settings', logger.WARNING)
return False
file_location = '' if not ep_obj else ep_obj.location
host_list = {x.strip() for x in host.split(',') if x.strip()}
hosts_all = hosts_match = {}
hosts_failed = set()
for cur_host in host_list:
url = 'http{0}://{1}/library/sections'.format(('', 's')[bool(app.PLEX_SERVER_HTTPS)], cur_host)
try:
xml_response = getURL(url, headers=self.headers, session=self.session, returns='text')
if not xml_response:
logger.log(u'PLEX: Error while trying to contact Plex Media Server: {0}'.format
(cur_host), logger.WARNING)
hosts_failed.add(cur_host)
continue
media_container = etree.fromstring(xml_response)
except IOError as error:
logger.log(u'PLEX: Error while trying to contact Plex Media Server: {0}'.format
(ex(error)), logger.WARNING)
hosts_failed.add(cur_host)
continue
except Exception as error:
if 'invalid token' in str(error):
logger.log(u'PLEX: Please set TOKEN in Plex settings: ', logger.WARNING)
else:
logger.log(u'PLEX: Error while trying to contact Plex Media Server: {0}'.format
(ex(error)), logger.WARNING)
hosts_failed.add(cur_host)
continue
sections = media_container.findall('.//Directory')
if not sections:
logger.log(u'PLEX: Plex Media Server not running on: {0}'.format
(cur_host), logger.DEBUG)
hosts_failed.add(cur_host)
continue
for section in sections:
if 'show' == section.attrib['type']:
keyed_host = [(str(section.attrib['key']), cur_host)]
hosts_all.update(keyed_host)
if not file_location:
continue
for section_location in section.findall('.//Location'):
section_path = re.sub(r'[/\\]+', '/', section_location.attrib['path'].lower())
section_path = re.sub(r'^(.{,2})[/\\]', '', section_path)
location_path = re.sub(r'[/\\]+', '/', file_location.lower())
location_path = re.sub(r'^(.{,2})[/\\]', '', location_path)
if section_path in location_path:
hosts_match.update(keyed_host)
if force:
return (', '.join(set(hosts_failed)), None)[not len(hosts_failed)]
if hosts_match:
logger.log(u'PLEX: Updating hosts where TV section paths match the downloaded show: ' + ', '.join(set(hosts_match)), logger.DEBUG)
else:
logger.log(u'PLEX: Updating all hosts with TV sections: ' + ', '.join(set(hosts_all)), logger.DEBUG)
hosts_try = (hosts_match.copy(), hosts_all.copy())[not len(hosts_match)]
for section_key, cur_host in iteritems(hosts_try):
url = 'http{0}://{1}/library/sections/{2}/refresh'.format(('', 's')[bool(app.PLEX_SERVER_HTTPS)], cur_host, section_key)
try:
getURL(url, headers=self.headers, session=self.session, returns='text')
except Exception as error:
logger.log(u'PLEX: Error updating library section for Plex Media Server: {0}'.format
(ex(error)), logger.WARNING)
hosts_failed.add(cur_host)
return (', '.join(set(hosts_failed)), None)[not len(hosts_failed)]
def get_token(self, username=None, password=None, plex_server_token=None):
username = username or app.PLEX_SERVER_USERNAME
password = password or app.PLEX_SERVER_PASSWORD
plex_server_token = plex_server_token or app.PLEX_SERVER_TOKEN
if plex_server_token:
self.headers['X-Plex-Token'] = plex_server_token
if 'X-Plex-Token' in self.headers:
return True
if not (username and password):
return True
logger.log(u'PLEX: fetching plex.tv credentials for user: ' + username, logger.DEBUG)
params = {
'user[login]': username,
'user[password]': password
}
try:
response = getURL('https://plex.tv/users/sign_in.json',
post_data=params,
headers=self.headers,
session=self.session,
returns='json')
self.headers['X-Plex-Token'] = response['user']['authentication_token']
except Exception as error:
self.headers.pop('X-Plex-Token', '')
logger.log(u'PLEX: Error fetching credentials from from plex.tv for user {0}: {1}'.format
(username, error), logger.DEBUG)
return 'X-Plex-Token' in self.headers
| gpl-3.0 | 4,151,996,194,805,680,000 | 42.219512 | 178 | 0.586155 | false |
MarketGarden/CardPublisher | cardpublisher/views/abstractviews.py | 1 | 2758 | # -*- coding: cp1252 -*-
from PyQt4 import QtCore, QtGui
import os
import os.path
models = None
class AbstractFormView:
"""gère le cycle de vie de sauvegarde d'un formulaire"""
def __init__(self):
self.saved = True
self.saveAction.clicked.connect(self.doSave)
def closeEvent(self, event):
"""override Widget::closeEvent()"""
self.doClose()
event.accept()
def doUnsave(self):
self.setWindowTitle("* %s" % self.file)
self.saveAction.setEnabled(True)
self.saved = False
def doSave(self):
models.SaveData(self.file, self.model)
self.setWindowTitle("%s" % self.file)
self.saveAction.setEnabled(False)
self.saved = True
def doClose(self):
if not self.saved:
reply = QtGui.QMessageBox.question(self, 'Avertissement',
"Sauvergarder?", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes : self.doSave()
def doSaveFile(self, input, title, filter):
path = self.absolutePath(str(input.text())) if input.text() else os.path.dirname(str(self.file))
filename = QtGui.QFileDialog.getSaveFileName(self, title, path, filter)
if filename:
input.setText(self.relativePath(str(filename)))
def doOpenFile(self, input, title, filter):
path = self.absolutePath(str(input.text())) if input.text() else os.path.dirname(str(self.file))
filename = QtGui.QFileDialog.getOpenFileName(self, title, path, filter)
if filename:
input.setText(self.relativePath(str(filename)))
def relativePath(self, path):
return RelativePath(str(path), str(self.file))
def absolutePath(self, relativePath):
return os.path.join(os.path.dirname(str(self.file)), str(relativePath))
def RelativePath(destination, rootFile):
try:
if os.path.isabs(destination):
return os.path.relpath(destination, os.path.dirname(rootFile))
else:
return destination
except ValueError:
return destination
if __name__ == "__main__":
print RelativePath("c:\\A\\B\\C\\", "c:\\A\\B\\C\\D\\test.root"), ".."
print RelativePath("d:\\A\\B\\C\\", "c:\\A\\B\\C\\D\\test.root"), "d:\A\B\C\\"
print RelativePath("c:\\A\\B\E\\", "c:\\A\\B\\C\\D\\test.root"), "..\..\E\\"
print RelativePath("c:\\A\\B\\E\\png.file", "c:\\A\\B\\C\\D\\test.root"), "..\..\E\\png.file"
print RelativePath("..\\KO\\B\\E\\png.file", "c:\\A\\B\\C\\D\\test.root"), "..\KO\B\E\\png.file"
print RelativePath("c:\\A\\B\\C\\D\\test.png", "c:\\A\\B\\C\\D\\test.root"), "test.png"
| gpl-3.0 | -7,044,412,616,233,355,000 | 36.27027 | 104 | 0.585932 | false |
QualiSystems/shellfoundry | shellfoundry/commands/extend_command.py | 1 | 6915 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import re
import shutil
import click
from shellfoundry.exceptions import VersionRequestException
from shellfoundry.utilities.config_reader import CloudShellConfigReader, Configuration
from shellfoundry.utilities.constants import (
METADATA_AUTHOR_FIELD,
TEMPLATE_AUTHOR_FIELD,
TEMPLATE_BASED_ON,
)
from shellfoundry.utilities.modifiers.definition.definition_modification import (
DefinitionModification,
)
from shellfoundry.utilities.repository_downloader import RepositoryDownloader
from shellfoundry.utilities.temp_dir_context import TempDirContext
from shellfoundry.utilities.validations import (
ShellGenerationValidations,
ShellNameValidations,
)
class ExtendCommandExecutor(object):
LOCAL_TEMPLATE_URL_PREFIX = "local:"
SIGN_FILENAME = "signed"
ARTIFACTS = {"driver": "src", "deployment": "deployments"}
def __init__(
self,
repository_downloader=None,
shell_name_validations=None,
shell_gen_validations=None,
):
"""Creates a new shell based on an already existing shell.
:param RepositoryDownloader repository_downloader:
:param ShellNameValidations shell_name_validations:
"""
self.repository_downloader = repository_downloader or RepositoryDownloader()
self.shell_name_validations = shell_name_validations or ShellNameValidations()
self.shell_gen_validations = (
shell_gen_validations or ShellGenerationValidations()
)
self.cloudshell_config_reader = Configuration(CloudShellConfigReader())
def extend(self, source, attribute_names):
"""Create a new shell based on an already existing shell.
:param str source: The path to the existing shell. Can be a url or local path
:param tuple attribute_names: Sequence of attribute names that should be added
"""
with TempDirContext("Extended_Shell_Temp_Dir") as temp_dir:
try:
if self._is_local(source):
temp_shell_path = self._copy_local_shell(
self._remove_prefix(
source, ExtendCommandExecutor.LOCAL_TEMPLATE_URL_PREFIX
),
temp_dir,
)
else:
temp_shell_path = self._copy_online_shell(source, temp_dir)
except VersionRequestException as err:
raise click.ClickException(str(err))
except Exception:
raise click.BadParameter("Check correctness of entered attributes")
# Remove shell version from folder name
shell_path = re.sub(r"-\d+(\.\d+)*/?$", "", temp_shell_path)
os.rename(temp_shell_path, shell_path)
if not self.shell_gen_validations.validate_2nd_gen(shell_path):
raise click.ClickException("Invalid second generation Shell.")
modificator = DefinitionModification(shell_path)
self._unpack_driver_archive(shell_path, modificator)
self._remove_quali_signature(shell_path)
self._change_author(shell_path, modificator)
self._add_based_on(shell_path, modificator)
self._add_attributes(shell_path, attribute_names)
try:
shutil.move(shell_path, os.path.curdir)
except shutil.Error as err:
raise click.BadParameter(str(err))
click.echo("Created shell based on source {}".format(source))
def _copy_local_shell(self, source, destination):
"""Copy shell and extract if needed."""
if os.path.isdir(source):
source = source.rstrip(os.sep)
name = os.path.basename(source)
ext_shell_path = os.path.join(destination, name)
shutil.copytree(source, ext_shell_path)
else:
raise
return ext_shell_path
def _copy_online_shell(self, source, destination):
"""Download shell and extract it."""
archive_path = None
try:
archive_path = self.repository_downloader.download_file(source, destination)
ext_shell_path = (
self.repository_downloader.repo_extractor.extract_to_folder(
archive_path, destination
)
)
ext_shell_path = ext_shell_path[0]
finally:
if archive_path and os.path.exists(archive_path):
os.remove(archive_path)
return os.path.join(destination, ext_shell_path)
@staticmethod
def _is_local(source):
return source.startswith(ExtendCommandExecutor.LOCAL_TEMPLATE_URL_PREFIX)
@staticmethod
def _remove_prefix(string, prefix):
return string.rpartition(prefix)[-1]
def _unpack_driver_archive(self, shell_path, modificator=None):
"""Unpack driver files from ZIP-archive."""
if not modificator:
modificator = DefinitionModification(shell_path)
artifacts = modificator.get_artifacts_files(
artifact_name_list=list(self.ARTIFACTS.keys())
)
for artifact_name, artifact_path in artifacts.items():
artifact_path = os.path.join(shell_path, artifact_path)
if os.path.exists(artifact_path):
self.repository_downloader.repo_extractor.extract_to_folder(
artifact_path,
os.path.join(shell_path, self.ARTIFACTS[artifact_name]),
)
os.remove(artifact_path)
@staticmethod
def _remove_quali_signature(shell_path):
"""Remove Quali signature from shell."""
signature_file_path = os.path.join(
shell_path, ExtendCommandExecutor.SIGN_FILENAME
)
if os.path.exists(signature_file_path):
os.remove(signature_file_path)
def _change_author(self, shell_path, modificator=None):
"""Change shell authoring."""
author = self.cloudshell_config_reader.read().author
if not modificator:
modificator = DefinitionModification(shell_path)
modificator.edit_definition(field=TEMPLATE_AUTHOR_FIELD, value=author)
modificator.edit_tosca_meta(field=METADATA_AUTHOR_FIELD, value=author)
def _add_based_on(self, shell_path, modificator=None):
"""Add Based_ON field to shell-definition.yaml file."""
if not modificator:
modificator = DefinitionModification(shell_path)
modificator.add_field_to_definition(field=TEMPLATE_BASED_ON)
def _add_attributes(self, shell_path, attribute_names, modificator=None):
"""Add a commented out attributes to the shell definition."""
if not modificator:
modificator = DefinitionModification(shell_path)
modificator.add_properties(attribute_names=attribute_names)
| apache-2.0 | 2,709,334,728,992,063,500 | 36.994505 | 88 | 0.635141 | false |
rougier/dana | examples/oja.py | 1 | 3086 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright INRIA
# Contributors: Nicolas P. Rougier ([email protected])
#
# DANA is a computing framework for the simulation of distributed,
# asynchronous, numerical and adaptive models.
#
# This software is governed by the CeCILL license under French law and abiding
# by the rules of distribution of free software. You can use, modify and/ or
# redistribute the software under the terms of the CeCILL license as circulated
# by CEA, CNRS and INRIA at the following URL
# http://www.cecill.info/index.en.html.
#
# As a counterpart to the access to the source code and rights to copy, modify
# and redistribute granted by the license, users are provided only with a
# limited warranty and the software's author, the holder of the economic
# rights, and the successive licensors have only limited liability.
#
# In this respect, the user's attention is drawn to the risks associated with
# loading, using, modifying and/or developing or reproducing the software by
# the user in light of its specific status of free software, that may mean that
# it is complicated to manipulate, and that also therefore means that it is
# reserved for developers and experienced professionals having in-depth
# computer knowledge. Users are therefore encouraged to load and test the
# software's suitability as regards their requirements in conditions enabling
# the security of their systems and/or data to be ensured and, more generally,
# to use and operate it in the same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
# -----------------------------------------------------------------------------
'''
Implementation of the Oja learning rule for extracting the principal component
of an elliptical gaussian distribution. Given that the distribution is
elliptical, its principal component should be oriented along the main axis of
the distribution, therefore, final weights should be +/-cos(theta), sin(theta)
References:
-----------
E. Oja, "A Simplified Neuron Model as a Principal Component Analyzer"
Journal of Mathematical Biology 15: 267-273, 1982.
'''
from numpy import *
from dana import *
def sample(theta, mu1, std1, mu2, std2):
''' Random sample according to an elliptical Gaussian distribution'''
u1 = random.random()
u2 = random.random()
T1 = sqrt(-2.0*log(u1))*cos(2.0*pi*u2)
T2 = sqrt(-2.0*log(u1))*sin(2.0*pi*u2)
x = mu1 + (std1*T1*cos(theta) - std2*T2*sin(theta))
y = mu2 + (std1*T1*sin(theta) + std2*T2*cos(theta))
return np.array([x,y])
theta = -135.0 * pi / 180.0
src = Group((2,), 'V = sample(theta,0.0,1.0,0.0,0.5)')
tgt = Group((1,), 'V')
C = DenseConnection(src('V'), tgt('V'), np.ones((1,2)),
'dW/dt = post.V*(pre.V-post.V*W)')
run(time=10.0,dt=0.001)
print "Learned weights : ", C.weights[0]
print "(should be +/- [%f, %f])" % (cos(theta), sin(theta))
| bsd-3-clause | -3,846,490,947,212,068,000 | 44.382353 | 79 | 0.685677 | false |
ideascube/pibox-installer | kiwix-hotspot/backend/util.py | 1 | 15466 | # -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
import os
import re
import sys
import time
import shlex
import signal
import ctypes
import tempfile
import threading
import subprocess
import data
from util import CLILogger
# windows-only flags to prevent sleep on executing thread
WINDOWS_SLEEP_FLAGS = {
# Enables away mode. This value must be specified with ES_CONTINUOUS.
# Away mode should be used only by media-recording and media-distribution
# applications that must perform critical background processing
# on desktop computers while the computer appears to be sleeping.
"ES_AWAYMODE_REQUIRED": 0x00000040,
# Informs the system that the state being set should remain in effect until
# the next call that uses ES_CONTINUOUS and one of the other state flags is cleared.
"ES_CONTINUOUS": 0x80000000,
# Forces the display to be on by resetting the display idle timer.
"ES_DISPLAY_REQUIRED": 0x00000002,
# Forces the system to be in the working state by resetting the system idle timer.
"ES_SYSTEM_REQUIRED": 0x00000001,
}
class CheckCallException(Exception):
def __init__(self, msg):
Exception(self, msg)
def startup_info_args():
if hasattr(subprocess, "STARTUPINFO"):
# On Windows, subprocess calls will pop up a command window by default
# when run from Pyinstaller with the ``--noconsole`` option. Avoid this
# distraction.
si = subprocess.STARTUPINFO()
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
cf = subprocess.CREATE_NEW_PROCESS_GROUP
else:
si = None
cf = 0
return {"startupinfo": si, "creationflags": cf}
def subprocess_pretty_call(
cmd, logger, stdin=None, check=False, decode=False, as_admin=False
):
""" flexible subprocess helper running separately and using the logger
cmd: the command to be run
logger: the logger to send debug output to
stdin: pipe input into the command
check: whether it should raise on non-zero return code
decode: whether it should decode output (bytes) into UTF-8 str
as_admin: whether the command should be run as root/admin """
if as_admin:
if sys.platform == "win32":
if logger is not None:
logger.std("Call (as admin): " + str(cmd))
return run_as_win_admin(cmd, logger)
from_cli = logger is None or type(logger) == CLILogger
cmd = get_admin_command(cmd, from_gui=not from_cli, logger=logger)
# We should use subprocess.run but it is not available in python3.4
process = subprocess.Popen(
cmd,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**startup_info_args()
)
if logger is not None:
logger.std("Call: " + str(process.args))
process.wait()
lines = (
[l.decode("utf-8", "ignore") for l in process.stdout.readlines()]
if decode
else process.stdout.readlines()
)
if logger is not None:
for line in lines:
logger.raw_std(line if decode else line.decode("utf-8", "ignore"))
if check:
if process.returncode != 0:
raise CheckCallException("Process %s failed" % process.args)
return lines
return process.returncode, lines
def subprocess_pretty_check_call(cmd, logger, stdin=None, as_admin=False):
return subprocess_pretty_call(
cmd=cmd, logger=logger, stdin=stdin, check=True, as_admin=as_admin
)
def subprocess_timed_output(cmd, logger, timeout=10):
logger.std("Getting output of " + str(cmd))
return subprocess.check_output(
cmd, universal_newlines=True, timeout=timeout
).splitlines()
def subprocess_external(cmd, logger):
""" spawn a new process without capturing nor watching it """
logger.std("Opening: " + str(cmd))
subprocess.Popen(cmd)
def is_admin():
""" whether current process is ran as Windows Admin or unix root """
if sys.platform == "win32":
try:
return ctypes.windll.shell32.IsUserAnAdmin()
except Exception:
return False
return os.getuid() == 0
def run_as_win_admin(command, logger):
""" run specified command with admin rights """
params = " ".join(['"{}"'.format(x) for x in command[1:]]).strip()
rc = ctypes.windll.shell32.ShellExecuteW(None, "runas", command[0], params, None, 1)
# ShellExecuteW returns 5 if user chose not to elevate
if rc == 5:
raise PermissionError()
return rc
def get_admin_command(command, from_gui, logger, log_to=None):
""" updated command to run it as root on macos or linux
from_gui: whether called via GUI. Using cli sudo if not """
if not from_gui:
return ["sudo"] + command
if sys.platform == "darwin":
# write command to a separate temp bash script
script = (
"#!/bin/bash\n\n{command} 2>&1 {redir}\n\n"
'if [ $? -eq 1 ]; then\n echo "!!! echer returned 1" {redir}\n'
" exit 11\nfi\n\n".format(
command=" ".join([shlex.quote(cmd) for cmd in command]),
redir=">>{}".format(log_to) if log_to else "",
)
)
# add script content to logger
logger.raw_std(script)
with tempfile.NamedTemporaryFile(mode="w", suffix=".sh", delete=False) as fd:
fd.write(script)
fd.seek(0)
return [
"/usr/bin/osascript",
"-e",
'do shell script "/bin/bash {command}" '
"with administrator privileges".format(command=fd.name),
]
if sys.platform == "linux":
return ["pkexec"] + command
class EtcherWriterThread(threading.Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._should_stop = False # stop flag
self.exp = None # exception to be re-raised by caller
def stop(self):
self._should_stop = True
@classmethod
def show_log(cls, logger, log_to_file, log_file, process, eof=False):
if log_to_file:
try:
with open(log_file.name, "r") as f:
lines = f.readlines()
if len(lines) >= 2:
lines.pop()
# working
if "Validating" in lines[-1] or "Flashing" in lines[-1]:
logger.std(lines[-1].replace("\x1b[1A", "").strip())
elif "[1A" in lines[-1]: # still working but between progress
logger.std(lines[-2].replace("\x1b[1A", "").strip())
else: # probably at end of file
for line in lines[-5:]:
logger.std(line.replace("\x1b[1A", "").strip())
except Exception as exp:
logger.err("Failed to read etcher log output: {}".format(exp))
if not log_to_file or eof:
for line in process.stdout:
logger.raw_std(line.decode("utf-8", "ignore"))
def run(self,):
image_fpath, device_fpath, logger = self._args
logger.step("Copy image to sd card using etcher-cli")
from_cli = logger is None or type(logger) == CLILogger
cmd, log_to_file, log_file = get_etcher_command(
image_fpath, device_fpath, logger, from_cli
)
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **startup_info_args()
)
logger.std("Starting Etcher: " + str(process.args))
# intervals in second
sleep_interval = 2
log_interval = 60
counter = 0
while process.poll() is None:
counter += 1
if self._should_stop: # on cancel
logger.std(". cancelling...")
break
time.sleep(sleep_interval)
# increment sleep counter until we reach log interval
if counter < log_interval // sleep_interval:
counter += 1
continue
# reset counter and display log
counter = 0
self.show_log(logger, log_to_file, log_file, process)
try:
logger.std(". has process exited?")
process.wait(timeout=2)
except subprocess.TimeoutExpired:
logger.std(". process exited")
# send ctrl^c
if sys.platform == "win32":
logger.std(". sending ctrl^C")
process.send_signal(signal.CTRL_C_EVENT)
process.send_signal(signal.CTRL_BREAK_EVENT)
time.sleep(2)
if process.poll() is None:
logger.std(". sending SIGTERM")
process.terminate() # send SIGTERM
time.sleep(2)
if process.poll() is None:
logger.std(". sending SIGKILL")
process.kill() # send SIGKILL (SIGTERM again on windows)
time.sleep(2)
else:
logger.std(". process exited")
if not process.returncode == 0:
self.exp = CheckCallException(
"Process returned {}".format(process.returncode)
)
# capture last output
self.show_log(logger, log_to_file, log_file, process, eof=True)
if log_to_file:
log_file.close()
try:
os.unlink(log_file.name)
except Exception as exp:
logger.err(str(exp))
logger.std(". process done")
logger.progress(1)
def prevent_sleep(logger):
if sys.platform == "win32":
logger.std("Setting ES_SYSTEM_REQUIRED mode to current thread")
ctypes.windll.kernel32.SetThreadExecutionState(
WINDOWS_SLEEP_FLAGS["ES_CONTINUOUS"]
| WINDOWS_SLEEP_FLAGS["ES_SYSTEM_REQUIRED"]
| WINDOWS_SLEEP_FLAGS["ES_DISPLAY_REQUIRED"]
)
return
if sys.platform == "linux":
def make_unmapped_window(wm_name):
from Xlib import display
screen = display.Display().screen()
window = screen.root.create_window(0, 0, 1, 1, 0, screen.root_depth)
window.set_wm_name(wm_name)
window.set_wm_protocols([])
return window
logger.std("Suspending xdg-screensaver")
wid = None
try:
# Create window to use with xdg-screensaver
window = make_unmapped_window("caffeinate")
wid = hex(window.id)
cmd = ["/usr/bin/xdg-screensaver", "suspend", wid]
logger.std("Calling {}".format(cmd))
p = subprocess.Popen(" ".join(cmd), shell=True)
p.wait()
if not p.returncode == 0:
raise OSError("xdg-screensaver returned {}".format(p.returncode))
except Exception as exp:
logger.err("Unable to disable sleep. Please do it manually.")
return wid
if sys.platform == "darwin":
cmd = ["/usr/bin/caffeinate", "-dsi"]
logger.std("Calling {}".format(cmd))
process = subprocess.Popen(cmd, **startup_info_args())
return process
def restore_sleep_policy(reference, logger):
if sys.platform == "win32":
logger.std("Restoring ES_CONTINUOUS mode to current thread")
ctypes.windll.kernel32.SetThreadExecutionState(
WINDOWS_SLEEP_FLAGS["ES_CONTINUOUS"]
)
return
if sys.platform == "linux":
logger.std("Resuming xdg-screensaver (wid #{})".format(reference))
if reference is not None:
subprocess_pretty_call(
["/usr/bin/xdg-screensaver", "resume", reference], logger
)
return
if sys.platform == "darwin":
logger.std("Stopping caffeinate process #{}".format(reference.pid))
reference.kill()
reference.wait(5)
return
def get_etcher_command(image_fpath, device_fpath, logger, from_cli):
# on macOS, GUI sudo captures stdout so we use a log file
log_to_file = not from_cli and sys.platform == "darwin"
if log_to_file:
log_file = tempfile.NamedTemporaryFile(
suffix=".log", delete=False, encoding="utf-8"
)
else:
log_file = None
cmd = [
os.path.join(
data.data_dir,
"etcher-cli",
"etcher" if sys.platform == "win32" else "balena-etcher",
),
"-c",
"-y",
"-u",
"-d",
device_fpath,
image_fpath,
]
# handle sudo or GUI alternative for linux and macOS
if sys.platform in ("linux", "darwin"):
cmd = get_admin_command(
cmd,
from_gui=not from_cli,
logger=logger,
log_to=log_file.name if log_to_file else None,
)
return cmd, log_to_file, log_file
def flash_image_with_etcher(image_fpath, device_fpath, retcode, from_cli=False):
""" flash an image onto SD-card
use only with small image as there is no output capture on OSX
and it is not really cancellable.
retcode is a multiprocessing.Value """
logger = CLILogger()
cmd, log_to_file, log_file = get_etcher_command(
image_fpath, device_fpath, logger, from_cli
)
returncode, _ = subprocess_pretty_call(cmd, check=False, logger=logger)
retcode.value = returncode
if log_to_file:
try:
subprocess_pretty_call(["/bin/cat", log_file.name], logger, decode=True)
log_file.close()
os.unlink(log_file.name)
except Exception as exp:
logger.err(str(exp))
return returncode == 0
def sd_has_single_partition(sd_card, logger):
""" whether sd_card consists of a single partition (expected to be clean) """
try:
if sys.platform == "darwin":
disk_prefix = re.sub(r"\/dev\/disk([0-9]+)", r"disk\1s", sd_card)
lines = subprocess_timed_output(["diskutil", "list", sd_card], logger)
nb_partitions = len(
[
line.strip().rsplit(" ", 1)[-1].replace(disk_prefix, "").strip()
for line in lines
if disk_prefix in line
]
)
return nb_partitions == 1
elif sys.platform == "win32":
disk_prefix = re.sub(
r".+PHYSICALDRIVE([0-9+])", r"Disk #\1, Partition #", sd_card
)
lines = subprocess_timed_output(["wmic", "partition"], logger)
nb_partitions = len(
[
re.sub(r".+" + disk_prefix + r"([0-9]+).+", r"\1", line)
for line in lines
if disk_prefix in line
]
)
return nb_partitions == 1
elif sys.platform == "linux":
disk_prefix = re.sub(r"\/dev\/([a-z0-9]+)", r"─\1", sd_card)
lines = subprocess_timed_output(["/bin/lsblk", sd_card], logger)
nb_partitions = len(
[
line.strip().split(" ", 1)[0].replace(disk_prefix, "").strip()
for line in lines
if disk_prefix in line
]
)
return nb_partitions == 1
except Exception as exp:
logger.err(str(exp))
return False
| gpl-3.0 | 3,680,295,156,000,532,500 | 32.764192 | 88 | 0.563696 | false |
Brett777/Predict-Churn | model_management/datascience_framework.py | 1 | 8515 | import os
import io
import sys
import dill
import copy
from datetime import datetime
from .evaluator import Evaluator
from .utils import (
post_to_platform,
get_current_notebook,
strip_output,
get_current_notebook,
mkdir_p,
)
class DataScienceFramework(object):
def __init__(
self,
model,
problem_class,
x_test,
y_test,
name=None,
description=None,
evaluator=Evaluator,
):
# assign variables to class
self.name = name
self.description = description
self.model = model
self.problem_class = problem_class
self.y_test = list(y_test)
self.x_test = list(x_test)
self.framework = model.__module__.split(".")[0]
# get environment data
self._meta_data = self.meta_data()
self.y_pred = self.predict()
# initialize evaluator
self.evaluator = Evaluator(self.problem_class)
# class methods
@classmethod
def load(cls, model_id):
# use hard coded string to load for now
with open(".model_cache/sklearn_model_cache.pkl", "rb") as file:
instance = dill.load(file)
instance.model = instance.parse_model(io.BytesIO(instance.model_serialized))
return instance
@classmethod
def project_models(cls):
query = """
query($service_name: String!) {
runnableInstance(serviceName: $service_name) {
runnable {
project {
name
models {
edges {
node {
id
name
description
problemClass
framework
objectClass
language
languageVersion
createdAt
updatedAt
rank
hyperParameters
structure
author {
fullName
}
metrics {
edges {
node {
key
value
}
}
}
diagnostics {
edges {
node {
... on ModelDiagnosticROC {
title
falsePositiveRates
truePositiveRates
thresholds
}
... on ModelDiagnosticResidual {
title
observations
residuals
}
... on ModelDiagnosticConfusionMatrix {
title
matrix
}
}
}
}
parameters {
edges {
node {
key
value
confidenceInterval {
positive
negative
}
}
}
}
}
}
}
}
}
}
}
"""
response = post_to_platform(
{"query": query, "variables": {"service_name": os.environ["SERVICE_NAME"]}}
)
response_data = response.json()["data"]
models = list(
map(
lambda edge: edge["node"],
response_data["runnableInstance"]["runnable"]["project"]["models"][
"edges"
],
)
)
return models
# framework dependent functions
def predict(self):
""" Make prediction based on x_test """
raise NotImplementedError
def framework_version(self):
""" Return version of the framework been used. """
raise NotImplementedError
def object_class(self):
""" Return name of the model object. """
raise NotImplementedError
def parameter(self):
""" Get parameter from model. """
raise NotImplementedError
def hyperparameter(self):
""" Get hyper parameter from model. """
raise NotImplementedError
def serialize_model(self):
""" Default methods for serialize model. """
return dill.dumps(self.model)
def parse_model(self, model_file):
""" Default methods for reading in model. """
return dill.load(model_file)
# base framework functions
def meta_data(self):
""" Capture environment meta data. """
meta_data_obj = {
"name": self.name,
"description": self.description,
"framework": self.framework,
"createdAt": datetime.now().isoformat(),
"sessionName": os.environ["SERVICE_NAME"],
"language": "python",
"languageVersion": ".".join(map(str, sys.version_info[0:3])),
}
return meta_data_obj
def diagnostics(self):
""" Return diagnostics of model. """
return [fn(self.y_test, self.y_pred) for fn in self.evaluator.diagnostics]
def metrics(self):
""" Return evaluation of model performance. """
return [fn(self.y_test, self.y_pred) for fn in self.evaluator.metrics]
def summary(self):
""" Return all infomation that will be stored. """
model_meta = {
"diagnostics": self.diagnostics(),
"metrics": self.metrics(),
"parameters": self.parameter(),
"frameworkVersion": self.framework_version(),
"hyperParameters": self.hyperparameter(),
"problemClass": self.problem_class,
"objectClass": self.object_class(),
}
model_meta.update(self._meta_data)
return model_meta
def save(self):
""" Save all information to platform. """
self.model_serialized = self.serialize_model()
# save model object locally for now
#mkdir_p(".model_cache")
#with open(".model_cache/sklearn_model_cache.pkl", "w") as file:
# dill.dump(self, file)
model_meta = self.summary()
model_meta.update(
{
"data": {"y_pred": list(self.y_pred), "y_test": list(self.y_test)},
"notebook": get_current_notebook(),
}
)
query = """
mutation($input: CreateModelInput!) {
createModel(input: $input) {
clientMutationId
}
}
"""
return post_to_platform({"query": query, "variables": {"input": model_meta}})
| mit | -343,225,543,429,373,700 | 34.92827 | 91 | 0.376864 | false |
SebWouters/CheMPS2 | PyCheMPS2/tests/test12.py | 1 | 3497 | #
# CheMPS2: a spin-adapted implementation of DMRG for ab initio quantum chemistry
# Copyright (C) 2013-2018 Sebastian Wouters
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import numpy as np
import sys
import PyCheMPS2
import ctypes
# Set the seed of the random number generator and cout.precision
Initializer = PyCheMPS2.PyInitialize()
Initializer.Init()
#######################
### BCS Hamiltonian ###
#######################
eps = np.array([ -3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5 ], dtype=ctypes.c_double)
L = len( eps )
g = -1.0
power = 0.0
Nelec = L # Number of fermions in the model = Number of single-particle states
TwoS = 0 # Twice the total spin
Irrep = 0 # No point group is used, Irrep should ALWAYS be zero.
'''
Model: h_ij = delta_ij eps[i]
v_ijkl = delta_ij delta_kl g ( eps[i] * eps[k] ) ^ {power}
h_ijkl = v_ijkl + ( delta_ik h_jl + delta_jl h_ik ) / ( N - 1 )
Ham = 0.5 sum_ijkl h_ijkl sum_sigma,tau a^+_{i,sigma} a^+_{j,tau} a_{l,tau} a_{k,sigma}
'''
# The Hamiltonian initializes all its matrix elements to 0.0
orbirreps = np.zeros( [ L ], dtype=ctypes.c_int )
group = 0
Ham = PyCheMPS2.PyHamiltonian( L, group, orbirreps )
# Setting up the Problem
Prob = PyCheMPS2.PyProblem( Ham, TwoS, Nelec, Irrep )
# Setting up the ConvergenceScheme
# setInstruction(instruction, D, Econst, maxSweeps, noisePrefactor)
OptScheme = PyCheMPS2.PyConvergenceScheme( 2 )
OptScheme.setInstruction( 0, 100, 1e-10, 10, 0.5 )
OptScheme.setInstruction( 1, 1000, 1e-10, 10, 0.0 )
# Run ground state calculation
theDMRG = PyCheMPS2.PyDMRG( Prob, OptScheme )
###############################################################################################
### Hack: overwrite the matrix elements with 4-fold symmetry directly in the Problem object ###
###############################################################################################
for orb1 in range( L ):
for orb2 in range( L ):
eri = g * ( abs( eps[ orb1 ] * eps[ orb2 ] )**power )
oei = ( eps[ orb1 ] + eps[ orb2 ] ) / ( Nelec - 1 )
if ( orb1 == orb2 ):
Prob.setMxElement( orb1, orb1, orb2, orb2, eri + oei )
else:
Prob.setMxElement( orb1, orb1, orb2, orb2, eri )
Prob.setMxElement( orb1, orb2, orb1, orb2, oei )
theDMRG.PreSolve() # New matrix elements require reconstruction of complementary renormalized operators
Energy = theDMRG.Solve()
theDMRG.calc2DMandCorrelations()
theDMRG.printCorrelations()
# Clean-up
# theDMRG.deleteStoredMPS()
theDMRG.deleteStoredOperators()
del theDMRG
del OptScheme
del Prob
del Ham
del Initializer
# Check whether the test succeeded
if ( np.fabs( Energy + 25.5134137600604 ) < 1e-8 ):
print("================> Did test 12 succeed : yes")
else:
print("================> Did test 12 succeed : no")
| gpl-2.0 | -2,050,985,702,913,222,700 | 35.427083 | 103 | 0.635402 | false |
jinjiaho/project57 | forms.py | 1 | 3372 | from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed, FileRequired
from wtforms import StringField, PasswordField, SubmitField, RadioField, validators, IntegerField, SelectField, BooleanField,DecimalField
from wtforms.validators import DataRequired, Email, Length
from flaskext.mysql import MySQL
class AddUserForm(FlaskForm):
name = StringField('Full Name', validators=[DataRequired("Please enter the name of the newcomer.")])
username= StringField('Username', validators=[DataRequired("Please enter a username.")])
role = RadioField('Role of User')
password = PasswordField('Password', validators=[DataRequired("Please enter a password."), Length(min=6, message="Passwords must be 6 characters or more.")])
submit = SubmitField('Add User')
class CreateNewItem(FlaskForm):
itemname = StringField('Item Name', validators=[DataRequired("Please enter the name of the new item.")])
category = StringField('Category of Item', validators = [DataRequired()])
price = DecimalField('Unit Price', places=4, rounding=None, validators = [DataRequired()])
reorderpt = IntegerField('Reorder Point', validators = [DataRequired()])
count_unit = SelectField('Unit for Withdrawal', validators = [DataRequired()], choices=[("carton", "carton"), ("pc", "pc"), ("kg", "kg"), ("tin", "tin"), ("box", "box"), ("unit", "unit"), ("packet", "packet")])
order_unit = SelectField('Unit for Receiving', validators = [DataRequired()], choices=[("carton", "carton"), ("pc", "pc"), ("kg", "kg"), ("tin", "tin"), ("box", "box"), ("unit", "unit")])
order_multiplier = DecimalField('Item Quantity', places=4, rounding=None, validators = [DataRequired()])
submitTwo = SubmitField('Add New Item')
class ExistingItemsLocation(FlaskForm):
itemname = StringField('Item Name', validators=[DataRequired("Please insert the name of the item")])
tid = SelectField('Tag', coerce=int) # Value is tid
qty = IntegerField('Available Amount', validators = [DataRequired()])
submitFour = SubmitField('Assign To Tag')
class TransferItem(FlaskForm):
iname = StringField('Item Name')
tagOld = SelectField('Old Tag', coerce=int) # Value is tid
tagNew = SelectField('New Tag', coerce=int) # Value is tid
qty = IntegerField('Qty to Transfer', [validators.Optional()])
submit = SubmitField()
class LoginForm(FlaskForm):
username = StringField(validators=[DataRequired("Please enter a username")])
password = PasswordField(validators=[DataRequired('Please enter a password')])
remember = BooleanField()
submit = SubmitField()
class RetrievalForm(FlaskForm):
amount = StringField('Input the Amount Taken', validators=[validators.input_required()])
submit4 = SubmitField("Enter Quantity")
class AddNewLocation(FlaskForm):
tname = StringField('Name of New Tag', validators=[DataRequired("Please enter the name of the tag without spaces.")])
location = SelectField('Select Storeroom')
newLocation = StringField('Add a New Storeroom')
remarks = StringField('Remarks (optional)')
submitThree = SubmitField("Enter")
class TrackingForm(FlaskForm):
enabled = RadioField('Track Item Quantity? ', choices=[('yes','Yes'),('no','No')])
password = PasswordField(validators=[DataRequired('Please enter a password')])
remember = BooleanField()
submit = SubmitField()
class RemoveItem(FlaskForm):
iname = StringField('Item Name')
submit = SubmitField("Delete Item")
| mit | 4,052,969,891,147,546,600 | 49.328358 | 211 | 0.733393 | false |
kristohr/pybayenv2 | pybayenv/compute_average_bf.py | 1 | 4066 | #!/usr/bin/python
import sys, string, re, os, commands, time, math
#from scipy import stats
#import scipy as sp
import numpy as np
#import matplotlib as mpl
#from matplotlib import pyplot as plt
class SNP:
def __init__(self, name, num_env, t):
self.name = name
self.num_env = [False] * num_env
self.bf_list = [[0 for i in range(t)] for j in range(num_env)]
self.rel_signal = []
self.sum_signals = 0
self.lg_info = []
self.chr = 99
self.lg = 99
def get_name(self):
return self.name
def get_num_env(self):
return self.num_env
def set_num_env(self, n):
self.num_env[n] = True
def add_to_list(self, bf, k, i):
self.bf_list[k][i] = bf
def set_signal(self, gamma):
self.rel_signal.append(gamma)
self.sum_signals += gamma #Add to the total of signals
#Return the bf signal in variable k
def get_signal(self, k):
return self.rel_signal[k]
#Return the bf signal list
def get_signals(self):
return self.rel_signal
def get_sum_signals(self):
return self.sum_signals
def print_env(self):
print self.num_env
def get_median_bf(self, k):
#print self.bf_list[k]
bfs = np.array(self.bf_list[k])
median = np.median(bfs)
return median
def get_avg_bf(self, k):
#print self.bf_list[k]
bfs = np.array(self.bf_list[k])
avg = np.average(bfs)
return avg
def add_bf(self, bf):
self.sum_bf += bf
def get_sum_bf(self):
return self.sum_bf
def get_num_runs(self):
return self.num_runs
def get_bf_list(self):
return self.bf_list
def get_bf_list(self):
return self.bf_list
def set_lg_info(self, info):
self.lg_info.append(info)
def get_lg_info(self):
return self.lg_info
def set_chr(self, ch):
self.chr = ch
def get_chr(self):
return self.chr
def set_linkage_group(self, lg):
self.lg = lg
def get_linkage_group(self):
return self.lg
def compute_average_bf(num_var, num_tests):
N = int(num_var)
t = int(num_tests)
snp_dict = {}
for i in range (0, t):
filename = "results/bf_results_t" + str(i) + ".bf"
data = open( filename, "r")
print filename
lines = data.readlines()
for line in lines:
cols = line.split("\t")
snp_name = cols[0][0:-2]
if i > 9:
snp_name = snp_name[0:-1]
if snp_name in snp_dict:
snp = snp_dict[snp_name]
for k in range(0, N):
snp.add_to_list(float(cols[k+1]), k, i)
else:
snp = SNP(snp_name, N, t)
snp_dict[snp_name] = snp
for k in range(0, N):
snp.add_to_list(float(cols[k+1]), k, i)
data.close()
print "################LENGTH:" + str(len(snp_dict))
FILE1 = open("results/median_bf.txt", "w")
FILE2 = open("results/average_bf.txt", "w")
#bf_median = "marker\tsal1\tsal2\ttemp1\ttemp2\tox1\tox2\n"
#bf_avg = "marker\tsal1\tsal2\ttemp1\ttemp2\tox1\tox2\n"
bf_median = ""
bf_avg = ""
for key in snp_dict:
snp = snp_dict[key]
bf_avg += snp.get_name()
bf_median += snp.get_name()
for k in range(0, N):
bf_a = snp.get_avg_bf(k)
bf_m = snp.get_median_bf(k)
bf_avg += "\t" + str(bf_a)
bf_median += "\t" + str(bf_m)
bf_avg += "\n"
bf_median += "\n"
FILE1.write(bf_median)
FILE2.write(bf_avg)
FILE1.close()
FILE2.close()
if __name__ == '__main__':
# Terminate if too few arguments
if len(sys.argv) < 3:
print 'usage: %s <number of vars> <num tests>' % sys.argv[0]
sys.exit(-1)
main(sys.argv[1], sys.argv[2])
| bsd-3-clause | 8,263,703,149,634,889,000 | 23.792683 | 70 | 0.512789 | false |
thenakliman/nirikshak | nirikshak/post_task/console.py | 1 | 2103 | # Copyright 2017 <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from nirikshak.common import plugins
from nirikshak.post_task import base
LOG = logging.getLogger(__name__)
@plugins.register('console')
class FormatOutputConsole(base.FormatOutput):
@staticmethod
def _get_jaanch_result(jaanch_parameter):
if 'result' in jaanch_parameter['output']:
if str(jaanch_parameter['output']['result']) == \
str(jaanch_parameter['input']['result']):
return 'pass'
return 'fail'
return jaanch_parameter['input']['result']
def format_output(self, **kwargs):
jaanch_name = list(kwargs.keys())[0]
jaanch_parameter = kwargs[jaanch_name]
input_parameter = ''
for key, value in jaanch_parameter['input']['args'].items():
input_parameter = ("%s%s:%s," % (input_parameter, key, value))
jaanch_result = self._get_jaanch_result(jaanch_parameter)
jaanch_type = jaanch_parameter['type']
jaanch_name_type_param = ("%s,%s,%s" % (jaanch_name,
jaanch_type,
input_parameter))
separator = '.' * (120 - len(jaanch_name_type_param))
formatted_output = ("%s%s%s" % (jaanch_name_type_param, separator,
jaanch_result))
jaanch_parameter['formatted_output'] = formatted_output
LOG.info("%s output has been formatted for console", formatted_output)
return kwargs
| apache-2.0 | 6,413,825,219,913,676,000 | 39.442308 | 78 | 0.622444 | false |
matematik7/STM | tests/test_parser.py | 1 | 6119 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------
# tests/test_parser.py
#
# Test input arguments parser
# ----------------------------------------------------------------
# copyright (c) 2015 - Domen Ipavec
# Distributed under The MIT License, see LICENSE
# ----------------------------------------------------------------
from unittest import TestCase
import argparse, sys
from stm.configuration import Configuration
from stm.parser import Parser
# change argument parser to print to stdout
class ArgumentParser(argparse.ArgumentParser):
def error(self, message):
self.print_help(sys.stdout)
print('%s: error: %s\n' % (self.prog, message))
exit()
class Test_parser(TestCase):
def setUp(self):
self.parser = Parser()
def tearDown(self):
self.parser = None
def getConf(self, arguments):
return self.parser.getConfiguration(arguments.split(), ArgumentParser)
def assertInvalid(self, input):
with self.assertRaises(SystemExit):
self.getConf(input)
def test_empty(self):
self.assertInvalid('')
def test_direct(self):
conf = self.getConf('--input test.png test2.png --prefix pref --postfix post --folder fol')
self.assertItemsEqual(conf.input, ['test.png', 'test2.png'])
self.assertEqual(conf.name_prefix, 'pref')
self.assertEqual(conf.name_postfix, 'post')
self.assertEqual(conf.folder, 'fol')
def test_output(self):
self.assertInvalid('--input test.png test2.png --output test.png')
self.assertInvalid('--input . --output test.png')
conf = self.getConf('--input test.png --output test.png')
self.assertEqual(conf.output, 'test.png')
def test_recursive(self):
conf = self.getConf('--input test.png --recursive')
self.assertTrue(conf.recursive)
conf = self.getConf('--input test.png')
self.assertFalse(conf.recursive)
def test_debug(self):
conf = self.getConf('--input test.png --debug')
self.assertTrue(conf.debug)
conf = self.getConf('--input test.png')
self.assertFalse(conf.debug)
def test_verbose(self):
conf = self.getConf('--input test.png --verbose')
self.assertTrue(conf.verbose)
conf = self.getConf('--input test.png')
self.assertFalse(conf.verbose)
def test_file_format(self):
conf = self.getConf('--input test.png --fileFormat jpg')
self.assertEqual(conf.fileFormat, 'jpg')
self.assertInvalid('--input test.png --fileFormat krn')
def test_size(self):
conf = self.getConf('--input test.png --size 123x456')
self.assertEqual(conf.size, [123, 456])
self.assertInvalid('--input test.png --size 0x2')
self.assertInvalid('--input test.png --size -12x2')
self.assertInvalid('--input test.png --size 123')
self.assertInvalid('--input test.png --size 12x12x12')
self.assertInvalid('--input test.png --size xxx')
def test_mode(self):
conf = self.getConf('--input test.png --scale')
self.assertEqual(conf.cropMode, 'none')
conf = self.getConf('--input test.png --padd')
self.assertEqual(conf.cropMode, 'padd')
conf = self.getConf('--input test.png --crop')
self.assertEqual(conf.cropMode, 'crop')
conf = self.getConf('--input test.png --smart')
self.assertEqual(conf.cropMode, 'smart')
conf = self.getConf('--input test.png')
self.assertEqual(conf.cropMode, 'smart')
self.assertInvalid('--input test.png --scale --padd')
self.assertInvalid('--input test.png --padd --crop')
self.assertInvalid('--input test.png --crop --featured a')
self.assertInvalid('--input test.png --featured a --smart')
self.assertInvalid('--input test.png --smart --scale')
def test_mode_featured(self):
conf = self.getConf('--input test.png --featured 100x30,-15x30')
self.assertEqual(conf.featured, ([100,30], [-15, 30]))
self.assertInvalid('--input test.png --featured xxx,xxx')
self.assertInvalid('--input test.png --featured 10x10x10,15x30')
self.assertInvalid('--input test.png --featured 10x10,10x10,10x10')
self.assertInvalid('--input test.png --featured 10x10')
self.assertInvalid('--input test.png --featured 10,10x10')
def test_padd_color(self):
conf = self.getConf('--input test.png --padd --paddColor 0,100,200,250')
self.assertEqual(conf.paddColor, [0,100,200,250])
conf = self.getConf('--input test.png --paddColor 0,100,200')
self.assertEqual(conf.paddColor, [0,100,200,255])
self.assertInvalid('--input test.png --padd --paddColor 0')
self.assertInvalid('--input test.png --padd --paddColor 0,100')
self.assertInvalid('--input test.png --padd --paddColor 0,100,100,100,100')
self.assertInvalid('--input test.png --padd --paddColor -1,100,100')
self.assertInvalid('--input test.png --padd --paddColor 256,100,100')
def test_zoominess(self):
conf = self.getConf('--input test.png --zoominess 10')
self.assertEqual(conf.zoominess, 10)
conf = self.getConf('--input test.png --zoominess 0')
self.assertEqual(conf.zoominess, 0)
self.assertInvalid('--input test.png --zoominess 101')
self.assertInvalid('--input test.png --zoominess -1')
self.assertInvalid('--input test.png --zoominess 45 --padd')
self.assertInvalid('--input test.png --zoominess 45 --crop')
self.assertInvalid('--input test.png --zoominess 45 --scale')
def test_allowPadd(self):
conf = self.getConf('--input test.png --allowPadd')
self.assertTrue(conf.allowPadd)
conf = self.getConf('--input test.png')
self.assertFalse(conf.allowPadd)
| mit | -2,415,367,671,543,004,000 | 37.484277 | 99 | 0.594868 | false |
Clinical-Genomics/housekeeper | tests/test_date.py | 1 | 2661 | """Tests for date parsing module"""
import datetime
import pytest
from housekeeper.date import get_date, match_date
def test_match_date_dash():
"""Test to match common string formated date"""
# GIVEN a datestring separated by '-'
date_str = "2015-05-10"
# WHEN checking if it is a valid datestring
res = match_date(date_str)
# THEN assert the result is True
assert res is True
def test_match_date_dot():
"""Test to match common string formated date"""
# GIVEN a datestring separated by '.'
date_str = "2015.05.10"
# WHEN checking if it is a valid datestring
res = match_date(date_str)
# THEN assert the result is True
assert res is True
def test_match_invalid_date():
"""Test to match a bad formated date string"""
# GIVEN a datestring without separators
date_str = "20150510"
# WHEN checking if it is a valid datestring
res = match_date(date_str)
# THEN assert the result is False
assert res is False
def test_match_non_date():
"""Test to match non date string"""
# GIVEN a datestring not even similar to a date
date_str = "hello"
# WHEN checking if it is a valid datestring
res = match_date(date_str)
# THEN assert the result is False
assert res is False
def test_valid_date_str():
"""Test to get a date object from a valid date string"""
# GIVEN a datestring separated by '-'
date_str = "2015-05-10"
# WHEN converting to a datetime object
date_obj = get_date(date_str)
# THEN assert a succesfull conversion
assert isinstance(date_obj, datetime.datetime)
def test_valid_date_str_no_value():
"""Test get a datetime object when date string has no value"""
# GIVEN no datestring
date_str = None
# WHEN fetching a date object
date_obj = get_date(date_str)
# THEN assert a valid date was returned
assert isinstance(date_obj, datetime.datetime)
def test_datetime_str():
"""Test get a datetime object the string is a datetime str"""
# GIVEN a datestring directly from datetime
date = datetime.datetime.now()
date_str = str(date)
# WHEN converting the string to a datetime object
new_date = get_date(date_str)
# THEN assert the new date is the same as original
assert new_date == date
def test_datetime_timestamp(timestamp):
"""Test get a datetime object the string is a datetime str"""
# GIVEN a datestring directly from datetime
date_str = str(timestamp)
# WHEN converting the string to a datetime object
new_date = get_date(date_str)
# THEN assert the new date is the same as original
assert new_date == timestamp
| mit | 2,106,149,374,175,074,600 | 28.241758 | 66 | 0.677189 | false |
ciudadanointeligente/write-it | nuntium/user_section/views.py | 1 | 24845 | import requests
from django.contrib.auth.decorators import login_required
from subdomains.utils import reverse
from django.http import HttpResponse, Http404
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from django.views.generic import TemplateView, CreateView, DetailView, View, ListView, RedirectView
from django.views.generic.edit import UpdateView, DeleteView, FormView
from mailit.forms import MailitTemplateForm
from instance.models import WriteItInstance, WriteItInstanceConfig, WriteitInstancePopitInstanceRecord
from ..models import Message,\
NewAnswerNotificationTemplate, ConfirmationTemplate, \
Answer, Moderation, \
AnswerWebHook
from .forms import WriteItInstanceBasicForm, \
NewAnswerNotificationTemplateForm, ConfirmationTemplateForm, \
WriteItInstanceAnswerNotificationForm, \
WriteItInstanceApiAutoconfirmForm, \
WriteItInstanceCreateForm, \
WriteItInstanceModerationForm, \
WriteItInstanceMaxRecipientsForm, \
WriteItInstanceRateLimiterForm, \
WriteItInstanceWebBasedForm, \
AnswerForm, RelatePopitInstanceWithWriteItInstance, \
WebhookCreateForm
from django.contrib import messages as view_messages
from django.utils.translation import ugettext as _
import json
from nuntium.popit_api_instance import PopitApiInstance
from nuntium.tasks import pull_from_popit
from nuntium.user_section.forms import WriteItPopitUpdateForm
from django.contrib.sites.models import Site
class UserAccountView(TemplateView):
template_name = 'nuntium/profiles/your-profile.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(UserAccountView, self).dispatch(*args, **kwargs)
class WriteItInstanceDetailBaseView(DetailView):
model = WriteItInstance
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.kwargs['slug'] = request.subdomain
return super(DetailView, self).dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
self.object = super(DetailView, self).get_object(queryset=queryset)
#OK I don't know if it is better to test by id
if not self.object.owner.__eq__(self.request.user):
raise Http404
return self.object
class WriteItInstanceContactDetailView(WriteItInstanceDetailBaseView):
template_name = 'nuntium/profiles/contacts/contacts-per-writeitinstance.html'
def dispatch(self, request, *args, **kwargs):
self.kwargs['slug'] = request.subdomain
return super(WriteItInstanceContactDetailView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(WriteItInstanceContactDetailView, self).get_context_data(**kwargs)
context['people'] = self.object.persons.order_by('name')
return context
class WriteItInstanceStatusView(WriteItInstanceDetailBaseView):
def render_to_response(self, context, **response_kwargs):
status = self.object.pulling_from_popit_status
return HttpResponse(
json.dumps(status),
content_type='application/json',
**response_kwargs
)
class WriteItInstanceApiDocsView(WriteItInstanceDetailBaseView):
template_name = 'nuntium/writeitinstance_api_docs.html'
def get_context_data(self, *args, **kwargs):
context = super(WriteItInstanceApiDocsView, self).get_context_data(*args, **kwargs)
current_domain = Site.objects.get_current().domain
context['api_base_url'] = 'http://' + current_domain + '/api/v1/'
return context
class WriteItInstanceTemplateUpdateView(DetailView):
model = WriteItInstance
template_name = 'nuntium/profiles/templates.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.kwargs['slug'] = request.subdomain
return super(WriteItInstanceTemplateUpdateView, self).dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
self.object = super(WriteItInstanceTemplateUpdateView, self).get_object(queryset=queryset)
#OK I don't know if it is better to test by id
if not self.object.owner.__eq__(self.request.user):
raise Http404
return self.object
def get_context_data(self, **kwargs):
context = super(WriteItInstanceTemplateUpdateView, self).get_context_data(**kwargs)
context['new_answer_template_form'] = NewAnswerNotificationTemplateForm(
writeitinstance=self.object,
instance=self.object.new_answer_notification_template,
)
context['mailit_template_form'] = MailitTemplateForm(
writeitinstance=self.object,
instance=self.object.mailit_template,
)
context['confirmation_template_form'] = ConfirmationTemplateForm(
writeitinstance=self.object,
instance=self.object.confirmationtemplate,
)
return context
class WriteItInstanceUpdateView(UpdateView):
form_class = WriteItInstanceBasicForm
template_name = "nuntium/writeitinstance_update_form.html"
model = WriteItInstance
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.kwargs['slug'] = request.subdomain
return super(WriteItInstanceUpdateView, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
queryset = super(WriteItInstanceUpdateView, self).get_queryset().filter(owner=self.request.user)
return queryset
def get_success_url(self):
return reverse(
'writeitinstance_basic_update',
subdomain=self.object.slug,
)
class WriteItInstanceAdvancedUpdateView(UpdateView):
model = WriteItInstanceConfig
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.kwargs['slug'] = request.subdomain
return super(WriteItInstanceAdvancedUpdateView, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
return super(WriteItInstanceAdvancedUpdateView, self).get_queryset().filter(writeitinstance__owner=self.request.user)
def get_context_data(self, **kwargs):
context = super(WriteItInstanceAdvancedUpdateView, self).get_context_data(**kwargs)
context['writeitinstance'] = self.object.writeitinstance
return context
def get_slug_field(self):
return 'writeitinstance__slug'
class WriteItInstanceAnswerNotificationView(WriteItInstanceAdvancedUpdateView):
form_class = WriteItInstanceAnswerNotificationForm
template_name = 'nuntium/writeitinstance_answernotification_form.html'
def get_success_url(self):
return reverse(
'writeitinstance_answernotification_update',
subdomain=self.object.writeitinstance.slug
)
class WriteItInstanceRateLimiterView(WriteItInstanceAdvancedUpdateView):
form_class = WriteItInstanceRateLimiterForm
template_name = 'nuntium/writeitinstance_ratelimiter_form.html'
def get_success_url(self):
return reverse(
'writeitinstance_ratelimiter_update',
subdomain=self.object.writeitinstance.slug
)
class WriteItInstanceModerationView(WriteItInstanceAdvancedUpdateView):
form_class = WriteItInstanceModerationForm
template_name = 'nuntium/writeitinstance_moderation_form.html'
def get_success_url(self):
return reverse(
'writeitinstance_moderation_update',
subdomain=self.object.writeitinstance.slug
)
class WriteItInstanceApiAutoconfirmView(WriteItInstanceAdvancedUpdateView):
form_class = WriteItInstanceApiAutoconfirmForm
template_name = 'nuntium/writeitinstance_autoconfirm_form.html'
def get_success_url(self):
return reverse(
'writeitinstance_api_autoconfirm_update',
subdomain=self.object.writeitinstance.slug
)
class WriteItInstanceMaxRecipientsView(WriteItInstanceAdvancedUpdateView):
form_class = WriteItInstanceMaxRecipientsForm
template_name = 'nuntium/writeitinstance_max_recipients_form.html'
def get_success_url(self):
return reverse(
'writeitinstance_maxrecipients_update',
subdomain=self.object.writeitinstance.slug
)
class WriteItInstanceWebBasedView(WriteItInstanceAdvancedUpdateView):
form_class = WriteItInstanceWebBasedForm
template_name = 'nuntium/writeitinstance_web_based_form.html'
def get_success_url(self):
return reverse(
'writeitinstance_webbased_update',
subdomain=self.object.writeitinstance.slug
)
class UserSectionListView(ListView):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(UserSectionListView, self).dispatch(*args, **kwargs)
def get_queryset(self):
queryset = super(UserSectionListView, self).get_queryset().filter(owner=self.request.user)
return queryset
class WriteItInstanceCreateView(CreateView):
model = WriteItInstance
form_class = WriteItInstanceCreateForm
template_name = 'nuntium/create_new_writeitinstance.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(WriteItInstanceCreateView, self).dispatch(*args, **kwargs)
def get_success_url(self):
return reverse(
'welcome',
subdomain=self.object.slug
)
def get_form_kwargs(self):
kwargs = super(WriteItInstanceCreateView, self).get_form_kwargs()
kwargs['owner'] = self.request.user
if 'data' in kwargs and kwargs['data'].get('legislature'):
kwargs['data'] = kwargs['data'].copy()
kwargs['data']['popit_url'] = kwargs['data']['legislature']
return kwargs
def get_context_data(self, *args, **kwargs):
context = super(WriteItInstanceCreateView, self).get_context_data(*args, **kwargs)
countries_json_url = ('http://everypolitician.github.io/'
'everypolitician-writeinpublic/countries.json')
context['countries'] = requests.get(countries_json_url).json()
return context
class YourInstancesView(UserSectionListView):
model = WriteItInstance
template_name = 'nuntium/profiles/your-instances.html'
def get_context_data(self, **kwargs):
kwargs = super(YourInstancesView, self).get_context_data(**kwargs)
kwargs['new_instance_form'] = WriteItInstanceCreateForm()
kwargs['live_sites'] = kwargs['object_list'].filter(config__testing_mode=False)
kwargs['test_sites'] = kwargs['object_list'].filter(config__testing_mode=True)
return kwargs
class LoginRequiredMixin(View):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
class WriteItInstanceOwnerMixin(LoginRequiredMixin):
def get_object(self):
slug = self.request.subdomain
pk = self.kwargs.get('pk')
return get_object_or_404(self.model, writeitinstance__slug=slug, writeitinstance__owner=self.request.user, pk=pk)
def get_context_data(self, **kwargs):
context = super(WriteItInstanceOwnerMixin, self).get_context_data(**kwargs)
context['writeitinstance'] = self.object.writeitinstance
return context
# Note that there is no need for subclasses of this to also subclass WriteItInstanceOwnerMixin
# as it does its own owner checking.
class UpdateTemplateWithWriteitBase(LoginRequiredMixin, UpdateView):
def get_object(self):
return get_object_or_404(self.model, writeitinstance__slug=self.request.subdomain, writeitinstance__owner=self.request.user)
def get_form_kwargs(self):
kwargs = super(UpdateTemplateWithWriteitBase, self).get_form_kwargs()
kwargs['writeitinstance'] = self.object.writeitinstance
return kwargs
def get_success_url(self):
return reverse(
'writeitinstance_template_update',
subdomain=self.object.writeitinstance.slug,
)
class NewAnswerNotificationTemplateUpdateView(UpdateTemplateWithWriteitBase):
form_class = NewAnswerNotificationTemplateForm
model = NewAnswerNotificationTemplate
class ConfirmationTemplateUpdateView(UpdateTemplateWithWriteitBase):
form_class = ConfirmationTemplateForm
model = ConfirmationTemplate
class MessagesPerWriteItInstance(LoginRequiredMixin, ListView):
model = Message
template_name = 'nuntium/profiles/messages_per_instance.html'
def get_queryset(self):
self.writeitinstance = get_object_or_404(WriteItInstance, slug=self.request.subdomain, owner=self.request.user)
return super(MessagesPerWriteItInstance, self).get_queryset().filter(writeitinstance=self.writeitinstance)
def get_context_data(self, **kwargs):
context = super(MessagesPerWriteItInstance, self).get_context_data(**kwargs)
context['writeitinstance'] = self.writeitinstance
return context
class MessageDetail(WriteItInstanceOwnerMixin, DetailView):
model = Message
template_name = "nuntium/profiles/message_detail.html"
class AnswerEditMixin(View):
def get_message(self):
raise NotImplementedError
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
self.message = self.get_message()
if self.message.writeitinstance.owner != self.request.user:
raise Http404
return super(AnswerEditMixin, self).dispatch(*args, **kwargs)
def get_success_url(self):
return reverse(
'message_detail_private',
subdomain=self.message.writeitinstance.slug,
kwargs={'pk': self.message.pk},
)
class AnswerCreateView(AnswerEditMixin, CreateView):
model = Answer
template_name = "nuntium/profiles/create_answer.html"
form_class = AnswerForm
def get_message(self):
message = Message.objects.get(id=self.kwargs['pk'])
return message
def get_form_kwargs(self):
kwargs = super(AnswerCreateView, self).get_form_kwargs()
kwargs['message'] = self.message
return kwargs
class AnswerUpdateView(AnswerEditMixin, UpdateView):
model = Answer
template_name = "nuntium/profiles/update_answer.html"
fields = ['content']
def get_message(self):
return self.model.objects.get(id=self.kwargs['pk']).message
class AcceptMessageView(RedirectView):
permanent = False
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(AcceptMessageView, self).dispatch(*args, **kwargs)
def get_redirect_url(self, *args, **kwargs):
message = get_object_or_404(Message,
pk=kwargs['pk'],
writeitinstance__slug=self.request.subdomain,
writeitinstance__owner=self.request.user
)
message.moderate()
view_messages.info(self.request, _('The message "%(message)s" has been accepted') % {'message': message})
return reverse(
'messages_per_writeitinstance',
subdomain=message.writeitinstance.slug,
)
class RejectMessageView(RedirectView):
permanent = False
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(RejectMessageView, self).dispatch(*args, **kwargs)
def get_redirect_url(self, *args, **kwargs):
message = get_object_or_404(Message,
pk=kwargs['pk'],
writeitinstance__slug=self.request.subdomain,
writeitinstance__owner=self.request.user
)
message.public = False
message.moderated = True
message.save()
view_messages.info(self.request, _('The message "%(message)s" has been rejected') % {'message': message})
return reverse(
'messages_per_writeitinstance',
subdomain=message.writeitinstance.slug,
)
class ModerationView(DetailView):
model = Moderation
slug_field = 'key'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(ModerationView, self).dispatch(*args, **kwargs)
def get_queryset(self):
queryset = super(ModerationView, self).get_queryset()
queryset.filter(
message__writeitinstance__owner=self.request.user,
message__writeitinstance__slug=self.request.subdomain,
)
return queryset
class AcceptModerationView(ModerationView):
template_name = "nuntium/moderation_accepted.html"
def get(self, *args, **kwargs):
moderation = self.get_object()
moderation.message.moderate()
return super(AcceptModerationView, self).get(*args, **kwargs)
class RejectModerationView(ModerationView):
template_name = "nuntium/moderation_rejected.html"
def get(self, *args, **kwargs):
get = super(RejectModerationView, self).get(*args, **kwargs)
self.object.message.public = False
# It is turned True to avoid users to
# mistakenly moderate this message
# in the admin section
self.object.message.moderated = True
self.object.message.save()
return get
class WriteitPopitRelatingView(FormView):
form_class = RelatePopitInstanceWithWriteItInstance
template_name = 'nuntium/profiles/writeitinstance_and_popit_relations.html'
# This method also checks for instance ownership
def get_writeitinstance(self):
self.writeitinstance = get_object_or_404(WriteItInstance, slug=self.request.subdomain, owner=self.request.user)
def dispatch(self, *args, **kwargs):
self.get_writeitinstance()
return super(WriteitPopitRelatingView, self).dispatch(*args, **kwargs)
def get_form_kwargs(self):
kwargs = super(WriteitPopitRelatingView, self).get_form_kwargs()
kwargs['writeitinstance'] = self.writeitinstance
return kwargs
def get_success_url(self):
return reverse('relate-writeit-popit', subdomain=self.writeitinstance.slug)
def form_valid(self, form):
form.relate()
# It returns an AsyncResult http://celery.readthedocs.org/en/latest/reference/celery.result.html
# that we could use for future information about this process
return super(WriteitPopitRelatingView, self).form_valid(form)
def get_context_data(self, **kwargs):
context = super(WriteitPopitRelatingView, self).get_context_data(**kwargs)
context['writeitinstance'] = self.writeitinstance
context['relations'] = self.writeitinstance.writeitinstancepopitinstancerecord_set.all()
return context
class ReSyncFromPopit(View):
def dispatch(self, *args, **kwargs):
if not self.request.user.is_authenticated():
raise Http404
return super(ReSyncFromPopit, self).dispatch(*args, **kwargs)
def post(self, request, *args, **kwargs):
writeitinstance = get_object_or_404(WriteItInstance,
slug=self.request.subdomain,
owner=self.request.user)
popits_previously_related = PopitApiInstance.objects.filter(
writeitinstancepopitinstancerecord__writeitinstance=writeitinstance)
popit_api_instance = get_object_or_404(popits_previously_related, pk=kwargs['popit_api_pk'])
pull_from_popit.delay(writeitinstance, popit_api_instance)
return HttpResponse()
class WriteItPopitUpdateView(UpdateView):
form_class = WriteItPopitUpdateForm
model = WriteitInstancePopitInstanceRecord
def get_writeitinstance(self):
self.writeitinstance = get_object_or_404(WriteItInstance, slug=self.request.subdomain, owner=self.request.user)
def dispatch(self, *args, **kwargs):
self.get_writeitinstance()
if self.request.method != 'POST':
return self.http_method_not_allowed(*args, **kwargs)
return super(WriteItPopitUpdateView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
form.save()
return HttpResponse(
json.dumps({
'id': form.instance.id,
'periodicity': form.instance.periodicity
}),
content_type='application/json'
)
def form_invalid(self, form):
super(WriteItPopitUpdateView, self).form_invalid(form)
return HttpResponse(
json.dumps({
'errors': form.errors
}),
content_type='application/json'
)
class WriteItDeleteView(DeleteView):
model = WriteItInstance
# @method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.kwargs['slug'] = request.subdomain
return super(WriteItDeleteView, self).dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
obj = super(WriteItDeleteView, self).get_object(queryset=queryset)
if not obj.owner == self.request.user:
raise Http404
return obj
def get_success_url(self):
url = reverse('your-instances')
return url
class MessageTogglePublic(RedirectView):
permanent = False
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(MessageTogglePublic, self).dispatch(*args, **kwargs)
def get_redirect_url(self, *args, **kwargs):
message = get_object_or_404(Message,
pk=kwargs['pk'],
writeitinstance__slug=self.request.subdomain,
writeitinstance__owner=self.request.user,
)
message.public = not message.public
message.save()
if message.public:
view_messages.info(self.request, _("This message has been marked as public"))
else:
view_messages.info(self.request, _("This message has been marked as private"))
return reverse('messages_per_writeitinstance', subdomain=self.request.subdomain)
class ContactUsView(TemplateView):
template_name = 'nuntium/profiles/contact.html'
class WelcomeView(DetailView):
model = WriteItInstance
template_name = 'nuntium/profiles/welcome.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.kwargs['slug'] = request.subdomain
return super(WelcomeView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(WelcomeView, self).get_context_data(**kwargs)
# passing URLs in for easy insertion into the translation tags
# because we're using an overridden version of the url tag that
# doesn't allow the use of "as" to pass the url as a variable
# that can be quoted within a translation block. *sigh*
context['url_template_update'] = reverse('writeitinstance_template_update', subdomain=self.request.subdomain)
context['url_basic_update'] = reverse('writeitinstance_basic_update', subdomain=self.request.subdomain)
context['url_maxrecipients_update'] = reverse('writeitinstance_maxrecipients_update', subdomain=self.request.subdomain)
context['url_answernotification_update'] = reverse('writeitinstance_answernotification_update', subdomain=self.request.subdomain)
context['url_recipients'] = reverse('contacts-per-writeitinstance', subdomain=self.request.subdomain)
context['url_data_sources'] = reverse('relate-writeit-popit', subdomain=self.request.subdomain)
return context
class WriteItInstanceWebHooksView(WriteItInstanceDetailBaseView):
template_name = 'nuntium/profiles/webhooks.html'
def get_context_data(self, *args, **kwargs):
context = super(WriteItInstanceWebHooksView, self).get_context_data(*args, **kwargs)
context['form'] = WebhookCreateForm(writeitinstance=self.object)
return context
class WriteItInstanceCreateWebHooksView(CreateView):
model = AnswerWebHook
form_class = WebhookCreateForm
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.kwargs['slug'] = request.subdomain
self.writeitinstance = get_object_or_404(WriteItInstance,
slug=self.kwargs['slug'],
owner=self.request.user)
return super(WriteItInstanceCreateWebHooksView, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super(WriteItInstanceCreateWebHooksView, self).get_form_kwargs()
kwargs['writeitinstance'] = self.writeitinstance
return kwargs
def get_success_url(self):
return reverse(
'writeitinstance_webhooks',
subdomain=self.writeitinstance.slug,
)
| gpl-3.0 | 4,677,099,118,318,445,000 | 36.417169 | 137 | 0.692413 | false |
cinepost/Copperfield_FX | copper/shout/drivers/refined.py | 1 | 2799 | #!/usr/bin/env python
#
# This program shows how to write data to mplay by writing data to the
# imdisplay program using a pipe.
#
# This program uses the -k option on imdisplay to perform progressive
# refinement when rendering an image. The image is quite simple.
#
# Notes:
# This uses the simple format (no deep rasters)
# It only writes 8-bit data
#
import os, struct, time
MAGIC = (ord('h')<<24) + (ord('M')<<16) + (ord('P')<<8) + ord('0')
DATASIZE = 1 # See .c file for meaning
NCHANNELS = 4 # See .c file for meaning
EO_IMAGE = -2 # End of image marker
RES = 256
COLORS = [
(0, 0, 0, 255),
(255, 0, 0, 255),
(0, 255, 0, 255),
(0, 0, 255, 255),
(255, 255, 0, 255),
(0, 255, 255, 255),
(255, 0, 255, 255),
(255, 255, 255, 255),
]
def quadrant(x, y):
# Determine which quadrant color to use
n = (x > y) * 4
n += (x > RES/2) * 2
n += (y > RES/2)
return n
class MPlay:
def __init__(self, xres, yres, name="Test Application"):
self.XRES = xres
self.YRES = yres
# Open a pipe to imdisplay
# -p tells imdisplay to read the data from the pipe
# -k tells imdisplay to keep reading data after the image has
# been fully written
self.fp = os.popen('imdisplay -p -k -n "%s"' % name, 'w')
# The header is documented in the C code examples
header = struct.pack('I'*8, MAGIC, xres, yres, DATASIZE,
NCHANNELS, 0, 0, 0)
self.fp.write(header)
def close(self):
# To tell imdisplay that the image has been finished, we send a special
# header.
header = struct.pack('iiii', EO_IMAGE, 0, 0, 0)
self.fp.write(header)
self.fp.close()
self.fp = None
def writeTile(self, x0, x1, y0, y1, clr):
# The tile header is documented in the c code.
header = struct.pack('IIII', x0, x1, y0, y1)
self.fp.write(header)
# The tile's bounds are inclusive, so to find the number of pixels we
# need to add one to each dimension.
size = (x1 - x0 + 1) * (y1 - y0 + 1)
pixel = struct.pack('BBBB', clr[0], clr[1], clr[2], clr[3])
# Write a bunch of pixel data
self.fp.write(pixel * size)
def render(self, step):
for y in range(0, self.XRES, step):
for x in range(0, self.YRES, step):
self.writeTile(x, x+step-1, y, y+step-1, COLORS[quadrant(x, y)])
def main():
mp = MPlay(RES, RES)
mp.writeTile(0, RES-1, 0, RES-1, (255, 128, 64, 255))
step = 64
while step > 0:
time.sleep(.5) # Let mplay update the latest image we wrote
mp.render(step)
step /= 2
mp.close()
if __name__ == '__main__':
main()
| unlicense | -7,526,381,331,422,434,000 | 30.1 | 80 | 0.554484 | false |
corpnewt/CorpBot.py | Cogs/BotAdmin.py | 1 | 12950 | import asyncio, discord, re, random
from operator import itemgetter
from discord.ext import commands
from Cogs import Utils, DisplayName, Message, PickList
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(BotAdmin(bot, settings))
class BotAdmin(commands.Cog):
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
self.dregex = re.compile(r"(?i)(discord(\.gg|app\.com)\/)(?!attachments)([^\s]+)")
self.mention_re = re.compile(r"[0-9]{17,21}")
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
async def message(self, message):
# Check for discord invite links and remove them if found - per server settings
if not self.dregex.search(message.content): return None # No invite in the passed message - nothing to do
# Got an invite - let's see if we care
if not self.settings.getServerStat(message.guild,"RemoveInviteLinks",False): return None # We don't care
# We *do* care, let's see if the author is admin/bot-admin as they'd have power to post invites
ctx = await self.bot.get_context(message)
if Utils.is_bot_admin(ctx): return None # We are immune!
# At this point - we need to delete the message
return { 'Ignore' : True, 'Delete' : True}
@commands.command(pass_context=True)
async def removeinvitelinks(self, ctx, *, yes_no = None):
"""Enables/Disables auto-deleting discord invite links in chat (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
await ctx.send(Utils.yes_no_setting(ctx,"Remove discord invite links","RemoveInviteLinks",yes_no))
@commands.command(pass_context=True)
async def setuserparts(self, ctx, member : discord.Member = None, *, parts : str = None):
"""Set another user's parts list (owner only)."""
# Only allow owner
isOwner = self.settings.isOwner(ctx.author)
if isOwner == None:
msg = 'I have not been claimed, *yet*.'
return await ctx.send(msg)
elif isOwner == False:
msg = 'You are not the *true* owner of me. Only the rightful owner can use this command.'
return await ctx.send(msg)
if member == None:
msg = 'Usage: `{}setuserparts [member] "[parts text]"`'.format(ctx.prefix)
return await ctx.send(msg)
if type(member) is str:
try:
member = discord.utils.get(ctx.guild.members, name=member)
except:
return await ctx.send("That member does not exist")
if not parts:
parts = ""
self.settings.setGlobalUserStat(member, "Parts", parts)
msg = '*{}\'s* parts have been set to:\n{}'.format(DisplayName.name(member), parts)
await ctx.send(Utils.suppressed(ctx,msg))
@setuserparts.error
async def setuserparts_error(self, error, ctx):
# do stuff
msg = 'setuserparts Error: {}'.format(error)
await ctx.send(msg)
@commands.command(pass_context=True)
async def ignore(self, ctx, *, member = None):
"""Adds a member to the bot's "ignore" list (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
if member == None:
msg = 'Usage: `{}ignore [member]`'.format(ctx.prefix)
return await ctx.send(msg)
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(memberName)
return await ctx.send(Utils.suppressed(ctx,msg))
ignoreList = self.settings.getServerStat(ctx.guild, "IgnoredUsers")
for user in ignoreList:
if str(member.id) == str(user["ID"]):
# Found our user - already ignored
return await ctx.send('*{}* is already being ignored.'.format(DisplayName.name(member)))
# Let's ignore someone
ignoreList.append({ "Name" : member.name, "ID" : member.id })
self.settings.setServerStat(ctx.guild, "IgnoredUsers", ignoreList)
await ctx.send('*{}* is now being ignored.'.format(DisplayName.name(member)))
@ignore.error
async def ignore_error(self, error, ctx):
# do stuff
msg = 'ignore Error: {}'.format(error)
await ctx.send(msg)
@commands.command(pass_context=True)
async def listen(self, ctx, *, member = None):
"""Removes a member from the bot's "ignore" list (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
if member == None:
return await ctx.send('Usage: `{}listen [member]`'.format(ctx.prefix))
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.guild)
if not member:
msg = 'I couldn\'t find *{}*...'.format(memberName)
return await ctx.send(Utils.suppressed(ctx,msg))
ignoreList = self.settings.getServerStat(ctx.guild, "IgnoredUsers")
for user in ignoreList:
if str(member.id) == str(user["ID"]):
# Found our user - already ignored
ignoreList.remove(user)
self.settings.setServerStat(ctx.guild, "IgnoredUsers", ignoreList)
return await ctx.send("*{}* is no longer being ignored.".format(DisplayName.name(member)))
await ctx.send('*{}* wasn\'t being ignored...'.format(DisplayName.name(member)))
@listen.error
async def listen_error(self, error, ctx):
# do stuff
msg = 'listen Error: {}'.format(error)
await ctx.send(msg)
@commands.command(pass_context=True)
async def ignored(self, ctx):
"""Lists the users currently being ignored."""
ignoreArray = self.settings.getServerStat(ctx.guild, "IgnoredUsers")
promoSorted = sorted(ignoreArray, key=itemgetter('Name'))
if not len(promoSorted):
return await ctx.send("I'm not currently ignoring anyone.")
ignored = ["*{}*".format(DisplayName.name(ctx.guild.get_member(int(x["ID"])))) for x in promoSorted if ctx.guild.get_member(int(x["ID"]))]
await ctx.send("Currently Ignored Users:\n{}".format("\n".join(ignored)))
async def kick_ban(self, ctx, members_and_reason = None, command_name = "kick"):
# Helper method to handle the lifting for kick and ban
if not await Utils.is_bot_admin_reply(ctx): return
if not members_and_reason:
return await ctx.send('Usage: `{}{} [space delimited member mention/id] [reason]`'.format(ctx.prefix, command_name))
# Force a mention - we don't want any ambiguity
args = members_and_reason.split()
# Get our list of targets
targets = []
missed = []
unable = []
reason = ""
for index,item in enumerate(args):
if self.mention_re.search(item): # Check if it's a mention
# Resolve the member
mem_id = int(re.sub(r'\W+', '', item))
member = ctx.guild.get_member(mem_id)
if member is None and command_name in ("ban","unban"): # Didn't get a valid member, let's allow a pre-ban/unban if we can resolve them
try: member = await self.bot.fetch_user(mem_id)
except: pass
# If we have an invalid mention, save it to report later
if member is None:
missed.append(str(mem_id))
continue
# Let's check if we have a valid member and make sure it's not:
# 1. The bot, 2. The command caller, 3. Another bot-admin/admin
if isinstance(member, discord.Member) and (member.id == self.bot.user.id or member.id == ctx.author.id or Utils.is_bot_admin(ctx,member)):
unable.append(member.mention)
continue
if not member in targets: targets.append(member) # Only add them if we don't already have them
else:
# Not a mention - must be the reason, dump the rest of the items into a string
# separated by a space
reason = " ".join(args[index:])
break
reason = reason if len(reason) else "No reason provided."
if not len(targets):
msg = "**With reason:**\n\n{}".format(reason)
if len(unable): msg = "**Unable to {}:**\n\n{}\n\n".format(command_name,"\n".join(unable)) + msg
if len(missed): msg = "**Unmatched ID{}:**\n\n{}\n\n".format("" if len(missed) == 1 else "s","\n".join(missed)) + msg
return await Message.EmbedText(title="No valid members passed!",description=msg,color=ctx.author).send(ctx)
# We should have a list of targets, and the reason - let's list them for confirmation
# then generate a 4-digit confirmation code that the original requestor needs to confirm
# in order to follow through
confirmation_code = "".join([str(random.randint(0,9)) for x in range(4)])
msg = "**To {} the following member{}:**\n\n{}\n\n**With reason:**\n\n\"{}\"\n\n**Please type:**\n\n`{}`{}{}".format(
command_name,
"" if len(targets) == 1 else "s",
"\n".join([x.name+"#"+x.discriminator for x in targets]),
reason if len(reason) else "None",
confirmation_code,
"" if not len(missed) else "\n\n**Unmatched ID{}:**\n\n{}".format("" if len(missed) == 1 else "s", "\n".join(missed)),
"" if not len(unable) else "\n\n**Unable to {}:**\n\n{}".format(command_name,"\n".join(unable))
)
confirmation_message = await Message.EmbedText(title="{} Confirmation".format(command_name.capitalize()),description=msg,color=ctx.author).send(ctx)
def check_confirmation(message):
return message.channel == ctx.channel and ctx.author == message.author # Just making sure it's the same user/channel
try: confirmation_user = await self.bot.wait_for('message', timeout=60, check=check_confirmation)
except: confirmation_user = ""
# Delete the confirmation message
await confirmation_message.delete()
# Verify the confirmation
if not confirmation_user.content == confirmation_code: return await ctx.send("{} cancelled!".format(command_name.capitalize()))
# We got the authorization!
message = await Message.EmbedText(title="{}ing...".format("Bann" if command_name == "ban" else "Unbann" if command_name == "unban" else "Kick"),color=ctx.author).send(ctx)
canned = []
cant = []
command = {"ban":ctx.guild.ban,"kick":ctx.guild.kick,"unban":ctx.guild.unban}.get(command_name.lower(),ctx.guild.kick)
for target in targets:
try:
await command(target,reason="{}#{}: {}".format(ctx.author.name,ctx.author.discriminator,reason))
canned.append(target)
except: cant.append(target)
msg = ""
if len(canned):
msg += "**I was ABLE to {}:**\n\n{}\n\n".format(command_name,"\n".join([x.name+"#"+x.discriminator for x in canned]))
if len(cant):
msg += "**I was UNABLE to {}:**\n\n{}\n\n".format(command_name,"\n".join([x.name+"#"+x.discriminator for x in cant]))
await Message.EmbedText(title="{} Results".format(command_name.capitalize()),description=msg).edit(ctx,message)
@commands.command(pass_context=True)
async def kick(self, ctx, *, members = None, reason = None):
"""Kicks the passed members for the specified reason.
All kick targets must be mentions or ids to avoid ambiguity (bot-admin only).
eg: $kick @user1#1234 @user2#5678 @user3#9012 for spamming"""
await self.kick_ban(ctx,members,"kick")
@commands.command(pass_context=True)
async def ban(self, ctx, *, members = None, reason = None):
"""Bans the passed members for the specified reason.
All ban targets must be mentions or ids to avoid ambiguity (bot-admin only).
eg: $ban @user1#1234 @user2#5678 @user3#9012 for spamming"""
await self.kick_ban(ctx,members,"ban")
@commands.command(pass_context=True)
async def unban(self, ctx, *, members = None, reason = None):
"""Unbans the passed members for the specified reason.
All unban targets must be mentions or ids to avoid ambiguity (bot-admin only).
eg: $unban @user1#1234 @user2#5678 @user3#9012 because we're nice"""
await self.kick_ban(ctx,members,"unban")
@commands.command()
async def banned(self, ctx, *, user_id = None):
"""Queries the guild's ban list for the passed user id and responds with whether they've been banned and the reason (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx): return
try: all_bans = await ctx.guild.bans()
except: return await ctx.send("I couldn't get the ban list :(")
if not len(all_bans): return await Message.EmbedText(title="Ban List",description="No bans found",color=ctx.author).send(ctx)
orig_user = user_id
try: user_id = int(user_id) if user_id != None else None
except: user_id = -1 # Use -1 to indicate unresolved
entries = []
for ban in all_bans:
entries.append({"name":"{}#{} ({})".format(ban.user.name,ban.user.discriminator,ban.user.id),"value":ban.reason if ban.reason else "No reason provided"})
if user_id != None and user_id == ban.user.id:
# Got a match - display it
return await Message.Embed(
title="Ban Found For {}".format(user_id),
fields=[entries[-1]], # Send the last found entry
color=ctx.author
).send(ctx)
return await PickList.PagePicker(title="Ban List ({:,} total)".format(len(entries)),description=None if user_id == None else "No match found for '{}'.".format(orig_user),list=entries,ctx=ctx).pick()
| mit | 2,491,016,822,418,353,700 | 43.759717 | 200 | 0.669035 | false |
proyectosdeley/proyectos_de_ley | migrate_db.py | 1 | 2327 | import dataset
import datetime
import os
import unicodedata
def convert_name_to_slug(name):
"""Takes a congresista name and returns its slug."""
name = name.replace(",", "").lower()
name = name.split(" ")
if len(name) > 2:
i = 0
slug = ""
while i < 3:
slug += name[i]
if i < 2:
slug += "_"
i += 1
slug = unicodedata.normalize('NFKD', slug).encode('ascii', 'ignore')
slug = str(slug, encoding="utf-8")
return slug + "/"
old_db = os.path.join("..", "leyes.db")
new_db = "leyes_sqlite3.db"
db = dataset.connect("sqlite:///" + old_db)
res = db.query("select * from proyectos")
new_items = []
slugs = [] # translation table between name an URL
for i in res:
timestamp = datetime.datetime.fromtimestamp(i['timestamp'])
i['time_created'] = timestamp
i['time_edited'] = timestamp
try:
fecha_presentacion = datetime.datetime.strptime(
i['fecha_presentacion'],
'%d/%m/%Y',
)
except ValueError:
fecha_presentacion = datetime.datetime.strptime(
i['fecha_presentacion'],
'%d/%m/%y',
)
fecha_presentacion = datetime.datetime.date(fecha_presentacion)
i['fecha_presentacion'] = fecha_presentacion
i['expediente'] = i['link_to_pdf']
if i['pdf_url'] is None:
i['pdf_url'] = ''
if i['seguimiento_page'] is None:
i['seguimiento_page'] = ''
del i['link_to_pdf']
del i['timestamp']
del i['id']
del i['link']
congresistas = i['congresistas'].split(';')
for congre in congresistas:
congre = congre.strip()
obj = dict(nombre=congre)
if congre is not None and congre.strip() != '':
congre_slug = convert_name_to_slug(congre)
obj['slug'] = congre_slug
if obj not in slugs and congre_slug is not None:
slugs.append(obj)
new_items.append(i)
db = dataset.connect("sqlite:///" + new_db)
table = db['pdl_proyecto']
table.insert_many(new_items)
table = db['pdl_slug']
table.insert_many(slugs)
# fix domain from example.com to proyectosdeley.pe
table = db['django_site']
table.update(dict(id=1, domain='proyectosdeley.pe', name='proyectosdeley.pe'),
['id']
)
| mit | -3,755,114,459,877,640,000 | 25.443182 | 78 | 0.568973 | false |
ubports-weblate/gallery-app | tests/autopilot/gallery_app/emulators/photo_viewer.py | 1 | 9588 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
# Copyright 2012-2015 Canonical
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
import logging
from autopilot.introspection.dbus import StateNotFoundError
import autopilot.logging
import ubuntuuitoolkit
from gallery_app.emulators import main_screen
from gallery_app.emulators.gallery_utils import(
GalleryAppException,
GalleryUtils
)
logger = logging.getLogger(__name__)
class PopupPhotoViewer(ubuntuuitoolkit.UbuntuUIToolkitCustomProxyObjectBase):
def _get_header(self):
main = self.get_root_instance().select_single(main_screen.MainScreen)
return main.select_single('PageHeader',
objectName='photoViewerHeader')
def _open_overflow(self):
overflow_button = self._get_header().select_single(
objectName='overflow_action_button')
self.pointing_device.click_object(overflow_button)
return self.get_root_instance().wait_select_single(
objectName='actions_overflow_panel',
visible=True)
def click_action_button(self, action_object_name):
header = self._get_header()
if not header.visible:
main = self.get_root_instance().select_single(
main_screen.MainScreen)
x, y, w, h = main.globalRect
self.pointing_device.move(x + (w // 2), y + (h // 2))
self.pointing_device.click()
header.visible.wait_for(True)
try:
object_name = action_object_name + "_button"
button = header.select_single(objectName=object_name)
self.pointing_device.click_object(button)
except StateNotFoundError:
object_name = action_object_name + "_button"
popover = self._open_overflow()
button = popover.select_single(objectName=object_name)
self.pointing_device.click_object(button)
@autopilot.logging.log_action(logger.info)
def delete_current_photo(self, confirm=True):
self.click_action_button("deleteButton")
if confirm:
self.confirm_delete_photo()
else:
self.cancel_delete_photo()
@autopilot.logging.log_action(logger.debug)
def confirm_delete_photo(self):
self._click_delete_dialog_button("Yes")
def _click_delete_dialog_button(self, name):
delete_dialog = self._get_delete_dialog()
button = delete_dialog.wait_select_single(
"Button", objectName="deletePhotoDialog" + name, visible=True)
self.pointing_device.click_object(button)
delete_dialog.wait_until_destroyed()
def _get_delete_dialog(self):
delete_dialog = self.get_root_instance().wait_select_single(
objectName="deletePhotoDialog")
delete_dialog.visible.wait_for(True)
delete_dialog.opacity.wait_for(1)
return delete_dialog
@autopilot.logging.log_action(logger.debug)
def cancel_delete_photo(self):
self._click_delete_dialog_button('No')
class PhotoViewer(GalleryUtils):
def __init__(self, app):
super(PhotoViewer, self).__init__(self)
self.app = app
def get_popup_album_picker(self):
"""Returns the photo viewer album pickers."""
return self.app.wait_select_single("PopupAlbumPicker",
objectName="popupAlbumPicker")
def get_share_peer_picker(self):
"""Returns the photo viewer share picker."""
return self.app.wait_select_single(objectName="sharePicker",
visible=True)
def get_photo_editor(self):
"""Returns the photo edit dialog."""
return self.app.wait_select_single("PhotoEditor")
def get_revert_to_original_dialog(self):
"""Returns the revert to original dialog."""
return self.app.wait_select_single("Dialog",
objectName="revertPromptDialog")
def get_cancel_revert_to_original_button(self):
"""Returns the revert to original cancel button."""
return self.get_revert_to_original_dialog().wait_select_single(
"Button",
objectName="cancelRevertButton",
visible=True)
def get_confirm_revert_to_original_button(self):
"""Returns the revert to original confirm button."""
return self.get_revert_to_original_dialog().wait_select_single(
"Button",
objectName="confirmRevertButton",
visible=True)
def get_photo_component(self):
# Was using a list index (lp:1247711). Still needs fixing, I'm not
# convinced this is a suitable way to select the correct item.
return self.app.wait_select_single(
"SingleMediaViewer",
objectName="openedMedia0"
)
def get_photos_list(self):
return self.app.wait_select_single("MediaListView")
def get_editor_actions_bar(self):
"""Returns the actions bar for the editor."""
return self.app.select_single("ActionsBar",
objectName="editorActionsBar")
def get_editor_action_button_by_text(self, button_text):
"""Returns the action button from the editor by text."""
actions_bar = self.get_editor_actions_bar()
buttons = actions_bar.select_many('AbstractButton')
for button in buttons:
if str(button.text) == button_text:
return button
raise GalleryAppException(
'Editor action button {} could not be found'.format(button_text))
def get_crop_action_button(self):
"""Returns the crop item of the edit dialog."""
return self.get_editor_action_button_by_text("Crop")
def get_rotate_action_button(self):
"""Returns the rotate item of the edit dialog."""
return self.get_editor_action_button_by_text("Rotate")
def get_undo_menu_item(self):
"""Returns the undo item of the edit dialog."""
return self.app.select_single("Standard", objectName="undoListItem")
def get_redo_menu_item(self):
"""Returns the redo item of the edit dialog."""
return self.app.select_single("Standard", objectName="redoListItem")
def get_revert_action_button(self):
"""Returns the revert to original menu item in the edit dialog."""
return self.get_editor_action_button_by_text("Revert to Original")
def get_auto_enhance_menu_item(self):
"""Returns the 'auto enhance' menu item in the edit dialog."""
return self.app.select_single("Standard", objectName='enhanceListItem')
def get_delete_popover_cancel_item(self):
"""Returns the cancel button of the delete popover."""
return self.app.wait_select_single("Button",
objectName="deletePhotoDialogNo",
visible=True)
def get_opened_photo(self):
"""Returns the first opened photo."""
return self.app.wait_select_single("SingleMediaViewer",
objectName="openedMedia0")
def get_crop_interactor(self):
"""Returns the crop interactor."""
return self.app.wait_select_single("CropInteractor",
objectName="cropInteractor")
def get_crop_overlay(self):
"""Returns the crop overlay."""
return self.app.wait_select_single("CropOverlay",
objectName="cropOverlay")
def get_top_left_crop_corner(self):
"""Returns the top left corner of the crop overlay for dragging."""
return self.app.wait_select_single("CropCorner",
objectName="topLeftCropCorner")
def get_crop_overlays_crop_icon(self):
"""Returns the crop icon of the crop overlay."""
return self.app.wait_select_single("Button",
objectName="centerCropIcon",
visible=True)
def get_edit_preview(self):
"""Returns the edit preview."""
return self.app.wait_select_single("EditPreview",
objectName="editPreview")
def _click_item(self, item):
self.pointing_device.click_object(item)
def click_rotate_button(self):
rotate_item = self.get_rotate_action_button()
self._click_item(rotate_item)
def click_crop_button(self):
crop_item = self.get_crop_action_button()
self._click_item(crop_item)
def click_undo_item(self):
undo_item = self.get_undo_menu_item()
self._click_item(undo_item)
def click_redo_item(self):
redo_item = self.get_redo_menu_item()
self._click_item(redo_item)
def click_revert_button(self):
revert_item = self.get_revert_action_button()
self._click_item(revert_item)
def click_cancel_revert_button(self):
cancel_item = self.get_cancel_revert_to_original_button()
self._click_item(cancel_item)
def click_confirm_revert_button(self):
confirm_item = self.get_confirm_revert_to_original_button()
self._click_item(confirm_item)
def click_enhance_item(self):
enhance_item = self.get_auto_enhance_menu_item()
self._click_item(enhance_item)
| gpl-3.0 | -9,074,009,003,350,327,000 | 37.66129 | 79 | 0.61577 | false |
wikkiewikkie/flask-googlecharts | docs/conf.py | 1 | 4921 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Flask-GoogleCharts documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 6 20:15:31 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Flask-GoogleCharts'
copyright = '2016, Kevin Schellenberg'
author = 'Kevin Schellenberg'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flask-GoogleChartsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Flask-GoogleCharts.tex', 'Flask-GoogleCharts Documentation',
'Kevin Schellenberg', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'flask-googlecharts', 'Flask-GoogleCharts Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Flask-GoogleCharts', 'Flask-GoogleCharts Documentation',
author, 'Flask-GoogleCharts', 'One line description of project.',
'Miscellaneous'),
]
| mit | -3,032,560,909,114,322,400 | 29.949686 | 79 | 0.678927 | false |
MaxMorgenstern/EmeraldAI | EmeraldAI/Logic/KnowledgeGathering/Weather.py | 1 | 1741 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import pyowm
from datetime import datetime
from EmeraldAI.Logic.Singleton import Singleton
from EmeraldAI.Config.Config import Config
class Weather(object):
__metaclass__ = Singleton
__owm = None
__language = None
__defaultCountry = None
def __init__(self):
self.__language = Config().Get("Weather", "Language")
self.__defaultCountry = Config().Get("Weather", "CountryCode2Letter")
self.__owm = pyowm.OWM(API_key=Config().Get("Weather", "OWMAPIKey"), language=self.__language)
def GetCurrentWeather(self, location, country=None):
if(country is None):
country = self.__defaultCountry
forcast = self.__owm.weather_at_place(
"{0},{1}".format(location, country))
return forcast.get_weather()
def GetFutureWeather(self, location, date=None, country=None):
if(country is None):
country = self.__defaultCountry
if(date is None):
date = datetime.date.today() + datetime.timedelta(days=1)
forcast = self.__owm.daily_forecast(
"{0},{1}".format(location, country))
return forcast.get_weather_at(date)
def GetThreeHoursForecast(self, location, country=None):
if(country is None):
country = self.__defaultCountry
forcast = self.__owm.three_hours_forecast(
"{0},{1}".format(location, country))
return forcast.get_forecast()
def GetDailyForecast(self, location, country=None):
if(country is None):
country = self.__defaultCountry
forcast = self.__owm.daily_forecast(
"{0},{1}".format(location, country))
return forcast.get_forecast()
| apache-2.0 | 6,567,496,256,828,528,000 | 34.530612 | 102 | 0.619759 | false |
emonty/ansible-container | ansible_container/shipit/modules/k8s_deployment.py | 1 | 9208 | #!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
module: k8s_deployment
short_description: Start, cancel or retry a deployment on a Kubernetes or OpenShift cluster.
description:
- Start, cancel or retry a deployment on a Kubernetes or OpenShift cluster by setting the C(state) to I(present) or
I(absent).
- Supports check mode. Use check mode to view a list of actions the module will take.
options:
'''
EXAMPLES = '''
'''
RETURN = '''
'''
import logging
import logging.config
from ansible.module_utils.basic import *
from ansible_container.shipit.k8s_api import K8sApi
from ansible_container.shipit.exceptions import ShipItException
logger = logging.getLogger('k8s_deployment')
LOGGING = (
{
'version': 1,
'disable_existing_loggers': True,
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'ansible-container.log'
}
},
'loggers': {
'k8s_deployment': {
'handlers': ['file'],
'level': 'INFO',
},
'container': {
'handlers': ['file'],
'level': 'INFO',
},
'compose': {
'handlers': [],
'level': 'INFO'
},
'docker': {
'handlers': [],
'level': 'INFO'
}
},
}
)
class K8SDeploymentManager(AnsibleModule):
def __init__(self):
self.arg_spec = dict(
project_name=dict(type='str', aliases=['namespace'], required=True),
state=dict(type='str', choices=['present', 'absent'], default='present'),
labels=dict(type='dict'),
deployment_name=dict(type='str'),
recreate=dict(type='bool', default=False),
replace=dict(type='bool', default=True),
selector=dict(type='dict'),
replicas=dict(type='int', default=1),
containers=dict(type='list'),
strategy=dict(type='str', default='Rolling', choices=['Recreate', 'Rolling']),
cli=dict(type='str', choices=['kubectl', 'oc'], default='oc'),
debug=dict(type='bool', default=False)
)
super(K8SDeploymentManager, self).__init__(self.arg_spec,
supports_check_mode=True)
self.project_name = None
self.state = None
self.labels = None
self.ports = None
self.deployment_name = None
self.selector = None
self.replace = None
self.replicas = None
self.containers = None
self.strategy = None
self.recreate = None
self.cli = None
self.api = None
self.debug = None
def exec_module(self):
for key in self.arg_spec:
setattr(self, key, self.params.get(key))
if self.debug:
LOGGING['loggers']['container']['level'] = 'DEBUG'
LOGGING['loggers']['k8s_deployment']['level'] = 'DEBUG'
logging.config.dictConfig(LOGGING)
self.api = K8sApi(target=self.cli)
actions = []
changed = False
deployments = dict()
results = dict()
try:
project_switch = self.api.set_project(self.project_name)
except ShipItException as exc:
self.fail_json(msg=exc.message, stderr=exc.stderr, stdout=exc.stdout)
if not project_switch:
actions.append("Create project %s" % self.project_name)
if not self.check_mode:
try:
self.api.create_project(self.project_name)
except ShipItException as exc:
self.fail_json(msg=exc.message, stderr=exc.stderr, stdout=exc.stdout)
if self.state == 'present':
deployment = self.api.get_resource('dc', self.deployment_name)
if not deployment:
template = self._create_template()
changed = True
actions.append("Create deployment %s" % self.deployment_name)
if not self.check_mode:
try:
self.api.create_from_template(template=template)
except ShipItException as exc:
self.fail_json(msg=exc.message, stderr=exc.stderr, stdout=exc.stdout)
elif deployment and self.recreate:
actions.append("Delete deployment %s" % self.deployment_name)
changed = True
template = self._create_template()
if not self.check_mode:
try:
self.api.delete_resource('dc', self.deployment_name)
self.api.create_from_template(template=template)
except ShipItException as exc:
self.fail_json(msg=exc.message, stderr=exc.stderr, stdout=exc.stdout)
elif deployment and self.replace:
template = self._create_template()
try:
template['status'] = dict(latestVersion=deployment['status']['latestVersion'] + 1)
except Exception as exc:
self.fail_json(msg="Failed to increment latestVersion for %s - %s" % (self.deployment_name,
str(exc)))
changed = True
actions.append("Update deployment %s" % self.deployment_name)
if not self.check_mode:
try:
self.api.replace_from_template(template=template)
except ShipItException as exc:
self.fail_json(msg=exc.message, stderr=exc.stderr, stdout=exc.stdout)
deployments[self.deployment_name.replace('-', '_') + '_deployment'] = self.api.get_resource('dc', self.deployment_name)
elif self.state == 'absent':
if self.api.get_resource('deployment', self.deployment_name):
changed = True
actions.append("Delete deployment %s" % self.deployment_name)
if self.check_mode:
try:
self.api.delete_resource('deployment', self.deployment_name)
except ShipItException as exc:
self.fail_json(msg=exc.message, stderr=exc.stderr, stdout=exc.stdout)
results['changed'] = changed
if self.check_mode:
results['actions'] = actions
if deployments:
results['ansible_facts'] = deployments
return results
def _create_template(self):
for container in self.containers:
if container.get('env'):
container['env'] = self._env_to_list(container['env'])
if container.get('ports'):
container['ports'] = self._port_to_container_ports(container['ports'])
template = dict(
apiVersion="v1",
kind="DeploymentConfig",
metadata=dict(
name=self.deployment_name,
),
spec=dict(
template=dict(
metadata=dict(),
spec=dict(
containers=self.containers
)
),
replicas=self.replicas,
strategy=dict(
type=self.strategy,
),
)
)
if self.labels:
template['metadata']['labels'] = self.labels
template['spec']['template']['metadata']['labels'] = self.labels
if self.selector:
template['spec']['selector'] = self.selector
return template
def _env_to_list(self, env_variables):
result = []
for name, value in env_variables.items():
result.append(dict(
name=name,
value=value
))
return result
@staticmethod
def _port_to_container_ports(ports):
result = []
for port in ports:
result.append(dict(containerPort=port))
return result
def main():
manager = K8SDeploymentManager()
results = manager.exec_module()
manager.exit_json(**results)
if __name__ == '__main__':
main()
| lgpl-3.0 | 7,921,536,518,088,795,000 | 33.74717 | 131 | 0.535947 | false |
smurfix/DaBroker | dabroker/client/codec.py | 1 | 10644 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division, unicode_literals
##
## This file is part of DaBroker, a distributed data access manager.
##
## DaBroker is Copyright © 2014 by Matthias Urlichs <[email protected]>,
## it is licensed under the GPLv3. See the file `README.rst` for details,
## including optimistic statements by the author.
##
## This paragraph is auto-generated and may self-destruct at any time,
## courtesy of "make update". The original is in ‘utils/_boilerplate.py’.
## Thus, please do not remove the next line, or insert any blank lines.
##BP
from weakref import ref,WeakValueDictionary
from functools import partial
from . import ClientBaseRef,ClientBaseObj
from ..base import BaseRef,BaseObj, BrokeredInfo, BrokeredInfoInfo, adapters as baseAdapters, common_BaseObj,common_BaseRef, NoData,ManyData
from ..base.service import current_service
import logging
logger = logging.getLogger("dabroker.client.serial")
class _NotGiven: pass
class CacheProxy(object):
"""Can't weakref a string, so …"""
def __init__(self,data):
self.data = data
def kstr(v):
k = getattr(v,'__dict__',None)
if k is not None:
k = k.get('_key',None)
if k is not None:
return '.'.join(str(x) for x in k.key)
else:
return str(v)
def search_key(a,**kw):
"""Build a reproducible string from search keywords"""
if a is None:
a = ()
return ','.join(kstr(v) for v in a) + '|' + ','.join('{}:{}'.format(k, kstr(v)) for k,v in sorted(kw.items()))
# This is the client's adapter storage.
adapters = baseAdapters[:]
def codec_adapter(cls):
adapters.append(cls)
return cls
# This is a list of special metaclasses, by key,
_registry = {}
def baseclass_for(*k):
"""\
Register a base class for a specific object type.
@k is the meta object's key tuple.
See test11 for an example which overrides the root object.
If your client class duplicates an attribute, it takes
precedence: the server's value of that attribute will not be
accessible.
Usage:
@baseclass_for("static","root","meta")
class MyRoot(ClientBaseObj):
def check_me(self):
return "This is a client-specific class"
You can use `None` as the last value (only), which behaves like an
any-single value placeholder.
"""
def proc(fn):
_registry[k] = fn
return fn
return proc
class ClientBrokeredInfo(ClientBaseObj,BrokeredInfo):
"""\
This is the base class for client-side meta objects.
"""
def __init__(self,*a,**k):
super(ClientBrokeredInfo,self).__init__(*a,**k)
self.searches = WeakValueDictionary()
self._class = None
def __call__(self, _is_meta=False, *a,**kw):
"""\
Return the class to use for objects with this as metaclass
"""
cls = self._class
if cls is None:
k = self._key.key
cls = _registry.get(k,None)
if cls is None:
# Allow a single wildcard at the end
cls = _registry.get((k[:-1])+(None,),object)
if _is_meta:
class ClientInfo(ClientBrokeredInfo,cls):
pass
else:
class ClientInfo(ClientBaseObj,cls):
pass
cls = ClientInfo
for k in self.fields.keys():
setattr(cls, '_dab_'+k if hasattr(cls,k) else k,FieldProperty(k))
for k in self.refs.keys():
if k != '_meta':
setattr(cls, '_dab_'+k if hasattr(cls,k) else k,RefProperty(k))
for k,v in self.backrefs.items():
setattr(cls, '_dab_'+k if hasattr(cls,k) else k,BackRefProperty(k,v))
for k,v in self.calls.items():
if not hasattr(cls,k):
setattr(cls,k,RpcProperty(v))
self._class = cls
return cls(*a,**kw)
def find(self, **kw):
if self._dab_cached is None:
raise RuntimeError("You cannot search "+repr(self))
for r in self.client.find(self, _cached=self._dab_cached, **kw):
if not isinstance(r,BaseObj):
r = r()
yield r
def get(self, **kw):
if self._dab_cached is None:
raise RuntimeError("You cannot search "+repr(self))
res = list(self.client.find(self, _limit=2,_cached=self._dab_cached, **kw))
if len(res) == 0:
raise NoData(cls=self,**kw)
elif len(res) == 2:
raise ManyData(cls=self,**kw)
else:
res = res[0]
if not isinstance(res,BaseObj):
res = res()
return res
def count(self, **kw):
if self._dab_cached is None:
raise RuntimeError("You cannot search "+repr(self))
return self.client.count(self, _cached=self._dab_cached, **kw)
def __repr__(self):
k=getattr(self,'_key',None)
if not k or not hasattr(self,'name'):
return super(ClientBrokeredInfo,self).__repr__()
return '‹I:{}:{}›'.format(self.name, '¦'.join(str(x) for x in k))
__str__=__unicode__=__repr__
class _ClientInfo(ClientBrokeredInfo):
"""Mix-in class for meta objects"""
_name = None
def __init__(self,*a,**k):
super(_ClientInfo,self).__init__(*a,**k)
class ClientBrokeredInfoInfo(ClientBrokeredInfo,BrokeredInfoInfo):
"""\
This is the client-side singleton meta object
(the root of DaBroker's object system)
"""
pass
client_broker_info_meta = ClientBrokeredInfoInfo()
class FieldProperty(object):
"""This property accessor handles updating non-referential attributes."""
# Note that there is no `__get__` method. It is not necessary,
# the value is stored in the object's `__dict__`;
# Python will get it from there.
def __init__(self, name):
self.name = name
def __set__(self, obj, val):
ov = obj.__dict__.get(self.name,_NotGiven)
obj.__dict__[self.name] = val
if ov is _NotGiven:
return
if obj._meta is None:
assert not ov or ov == val, (self.name,ov,val)
else:
import pdb;pdb.set_trace()
obj._meta._dab.obj_change(obj, self.name, ov,val)
class RefProperty(object):
"""This property accessor handles referred objects"""
def __init__(self, name):
self.name = name
def __get__(self, obj, type=None):
if obj is None:
return self
k = obj._refs.get(self.name,None)
if k is None:
return None
return obj._meta._dab.get(k)
def __set__(self, obj, val):
ov = obj._refs.get(self.name,_NotGiven)
if val is not None:
val = val._key
obj._refs[self.name] = val
if ov is _NotGiven:
return
obj._meta._dab.obj_change(obj, self.name, ov,val)
class BackRefProperty(object):
"""This property accessor handles retrieving one-to-many relationships"""
def __init__(self, name,refobj):
self.name = name
self.ref = ref(refobj)
def __get__(self, obj, type=None):
if obj is None:
return self
k = obj._refs.get(self.name,None)
if k is None:
k = obj._refs[self.name] = k = BackRefHandler(obj, self.name,self.ref)
return k
class BackRefHandler(object):
"""Manage a specific back reference"""
def __init__(self, obj, name,refobj):
self.obj = ref(obj)
self.name = name
self.ref = refobj
def _deref(self):
obj = self.obj()
ref = self.ref()
if obj is None or ref is None:
raise RuntimeError("weak ref: should not have been freed")
return obj,ref
def __getitem__(self,i):
obj,ref = self._deref()
res = obj._meta._dab.send("backref_idx",obj, self.name,i)
if isinstance(res,BaseRef):
res = res()
return res
def __len__(self):
obj,ref = self._deref()
return obj._meta._dab.send("backref_len",obj, self.name)
class RpcProperty(object):
"""This property accessor returns a shim which executes a RPC to the server."""
def __init__(self, proc, base=None):
self.name = proc.name
self.cached = getattr(proc,'cached',False)
self.for_class = getattr(proc,'for_class',None)
self.meta = getattr(proc,'meta',False)
self.base = base
def _do_call(self,obj, *a,**k):
with obj._dab.env:
if self.cached and not obj._obsolete:
kws = self.name+':'+search_key(a,**k)
ckey = " ".join(str(x) for x in obj._key.key)+":"+kws
res = obj._call_cache.get(kws,_NotGiven)
if res is not _NotGiven:
res = res.data
current_service.top._cache[ckey] # Lookup to increase counter
return res
res = obj._meta._dab.call(obj,self.name, a,k, _meta=self.meta)
if self.cached and not obj._obsolete:
rc = CacheProxy(res)
obj._call_cache[kws] = rc
current_service.top._cache[ckey] = rc
return res
def __get__(self, obj, type=None):
if self.for_class is None: # normal method
if obj is None:
return self
else: # static- or classmethod
obj=type
c = partial(RpcProperty._do_call, self,obj)
c.__name__ = str(self.name)
return c
def __call__(self, *a,**k):
# direct call, "classmethod"
assert self.base is not None
return self._do_call(self.base, *a,**k)
@codec_adapter
class client_BaseRef(common_BaseRef):
cls = ClientBaseRef
@staticmethod
def decode(k,c=None):
return ClientBaseRef(key=tuple(k),code=c)
@codec_adapter
class client_BaseObj(common_BaseObj):
@classmethod
def encode_ref(obj,k):
"""\
Encode a reference, without loading the actual object.
(Since we can't load the object without encoding a reference for it, that'd be somewhat difficult.)
"""
ref = obj._refs[k]
if ref is not None:
import pdb;pdb.set_trace()
ref = ClientBaseRef(meta=obj._meta, key=obj._key)
return ref
@classmethod
def decode(cls, k,c=None,f=None,r=None, _is_meta=False):
"""\
Convert this object to a class
"""
k = ClientBaseRef(key=tuple(k),code=c)
if not r or '_meta' not in r:
raise RuntimeError("Object without meta data")
m = r['_meta']
if not isinstance(m,ClientBrokeredInfo):
# assume it's a reference, so resolve it
r['_meta'] = m = m()
res = m(_is_meta)
res._key = k
# Got the class, now fill it with data
if f:
for k,v in f.items():
res.__dict__[k] = v
# do not use setattr here, it tries to record a change
if r:
for k,v in r.items():
if k == '_meta':
res._meta = v
else:
res._refs[k] = v
if f and _is_meta and 'calls' in f:
c = f['calls']
for k,v in c.items():
if getattr(v,'for_class',False):
res.__dict__[k] = RpcProperty(v,res)
pass
return current_service.top._add_to_cache(res)
@codec_adapter
class client_InfoObj(client_BaseObj):
cls = ClientBrokeredInfo
clsname = "Info"
@staticmethod
def decode(k=None,c=None,f=None, **kw):
if f is None:
# We always need the data, but this is something like a ref,
# so we need to go and get the real thing.
# NOTE this assumes that the codec doesn't throw away empty lists.
return ClientBaseRef(key=k,code=c)()
res = client_BaseObj.decode(_is_meta=True, k=k,c=c,f=f,**kw)
res.client = current_service.top
return res
@codec_adapter
class client_InfoMeta(object):
cls = ClientBrokeredInfoInfo
clsname = "_ROOT"
@staticmethod
def encode(obj, include=False):
return {}
@staticmethod
def decode(**attr):
return client_broker_info_meta
| gpl-3.0 | -5,950,866,018,713,775,000 | 25.984772 | 140 | 0.659142 | false |
hms-dbmi/clodius | test/tiles/multivec_test.py | 1 | 1503 | import os.path as op
import base64
import h5py
import pytest
import clodius.tiles.multivec as hgmu
def test_multivec():
filename = op.join("test/sample_data", "sample_gwas.multires.mv5")
with h5py.File(filename, "r") as h5:
tile_size = h5["info"].attrs["tile-size"]
resolutions = list(h5["resolutions"].keys())
reso = resolutions[0]
chroms = h5[f"resolutions/{reso}/chroms/name"][:]
num_rows = h5[f"resolutions/{reso}/values"][chroms[0]].shape[1]
total_length = sum(h5["chroms/length"])
# info
info = hgmu.tileset_info(filename)
assert info["shape"] == [tile_size, num_rows]
assert info["tile_size"] == tile_size
assert info["max_pos"] == total_length
assert set(info["resolutions"]) == set(int(reso) for reso in resolutions)
# get_single_tile
test_tile = hgmu.get_single_tile(filename, [0, 0])
assert list(test_tile.shape)[::-1] == info["shape"]
with pytest.raises(IndexError):
hgmu.get_single_tile(filename, [len(resolutions), 0])
# tiles
tids = [f"test_uuid.{level}.0.1231.123" for level in range(len(resolutions))]
tiles = hgmu.tiles(filename, tids)
for tile_id, tile_value in tiles:
tile_pos = [int(i) for i in tile_id.split(".")[1:3]]
single_tile = hgmu.get_single_tile(filename, tile_pos).astype(
tile_value["dtype"]
)
assert (
base64.b64encode(single_tile.ravel()).decode("utf-8") == tile_value["dense"]
)
| mit | 6,303,210,453,889,759,000 | 35.658537 | 88 | 0.618762 | false |
tongfa/vent | wserve/wserve/views.py | 1 | 1406 | from django.http import HttpResponse
from django.template.loader import get_template
from django.template import Context
from wserve.settings import VENT_WD, VENT_WWW_CLIENT_EP
import cPickle as pickle
import json, time, os
def address2key(address):
r = 0
for s in address[0].split('.'):
r = r << 8
r += int(s)
r = r << 16
r += address[1]
return r
def index(request):
t = get_template('index.html')
return HttpResponse(t.render(Context()))
def audio(request):
t = get_template('audio.html')
return HttpResponse(t.render(Context()))
def longcall(request):
time.sleep(1)
def url(c):
ep = VENT_WWW_CLIENT_EP
return 'http://%s%s/camera/%d/' % (
ep[0],
'' if ep[1] == 80 else ':%d' % ep[1],
address2key(c))
cameraList = os.listdir("%s" % VENT_WD)
if cameraList is None:
import code
code.interact(local=vars())
cameraList.sort()
cameraListIp = [pickle.load(open("%s/%s" % (VENT_WD, name), 'r'))
for name in cameraList]
# unique value, url, name
connList = [(address2key(c),url(c),c[0]) for c in cameraListIp]
response_data = {}
response_data['result'] = 'OK'
response_data['message'] = {'cameras': connList}
print response_data
return HttpResponse(json.dumps(response_data), content_type="application/json")
| mit | 2,273,925,697,469,779,000 | 28.914894 | 83 | 0.604552 | false |
sunyihuan326/DeltaLab | Andrew_NG_learning/class_two/week_two/testCases.py | 1 | 4051 | import numpy as np
def update_parameters_with_gd_test_case():
np.random.seed(1)
learning_rate = 0.01
W1 = np.random.randn(2, 3)
b1 = np.random.randn(2, 1)
W2 = np.random.randn(3, 3)
b2 = np.random.randn(3, 1)
dW1 = np.random.randn(2, 3)
db1 = np.random.randn(2, 1)
dW2 = np.random.randn(3, 3)
db2 = np.random.randn(3, 1)
parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2}
grads = {"dW1": dW1, "db1": db1, "dW2": dW2, "db2": db2}
return parameters, grads, learning_rate
"""
def update_parameters_with_sgd_checker(function, inputs, outputs):
if function(inputs) == outputs:
print("Correct")
else:
print("Incorrect")
"""
def random_mini_batches_test_case():
np.random.seed(1)
mini_batch_size = 64
X = np.random.randn(12288, 148)
Y = np.random.randn(1, 148) < 0.5
return X, Y, mini_batch_size
def initialize_velocity_test_case():
np.random.seed(1)
W1 = np.random.randn(2, 3)
b1 = np.random.randn(2, 1)
W2 = np.random.randn(3, 3)
b2 = np.random.randn(3, 1)
parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2}
return parameters
def update_parameters_with_momentum_test_case():
np.random.seed(1)
W1 = np.random.randn(2, 3)
b1 = np.random.randn(2, 1)
W2 = np.random.randn(3, 3)
b2 = np.random.randn(3, 1)
dW1 = np.random.randn(2, 3)
db1 = np.random.randn(2, 1)
dW2 = np.random.randn(3, 3)
db2 = np.random.randn(3, 1)
parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2}
grads = {"dW1": dW1, "db1": db1, "dW2": dW2, "db2": db2}
v = {'dW1': np.array([[0., 0., 0.],
[0., 0., 0.]]), 'dW2': np.array([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]]), 'db1': np.array([[0.],
[0.]]),
'db2': np.array([[0.],
[0.],
[0.]])}
return parameters, grads, v
def initialize_adam_test_case():
np.random.seed(1)
W1 = np.random.randn(2, 3)
b1 = np.random.randn(2, 1)
W2 = np.random.randn(3, 3)
b2 = np.random.randn(3, 1)
parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2}
return parameters
def update_parameters_with_adam_test_case():
np.random.seed(1)
v, s = ({'dW1': np.array([[0., 0., 0.],
[0., 0., 0.]]), 'dW2': np.array([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]]), 'db1': np.array([[0.],
[0.]]),
'db2': np.array([[0.],
[0.],
[0.]])}, {'dW1': np.array([[0., 0., 0.],
[0., 0., 0.]]), 'dW2': np.array([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]]),
'db1': np.array([[0.],
[0.]]), 'db2': np.array([[0.],
[0.],
[0.]])})
W1 = np.random.randn(2, 3)
b1 = np.random.randn(2, 1)
W2 = np.random.randn(3, 3)
b2 = np.random.randn(3, 1)
dW1 = np.random.randn(2, 3)
db1 = np.random.randn(2, 1)
dW2 = np.random.randn(3, 3)
db2 = np.random.randn(3, 1)
parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2}
grads = {"dW1": dW1, "db1": db1, "dW2": dW2, "db2": db2}
return parameters, grads, v, s
| mit | -7,129,573,524,181,546,000 | 34.535088 | 105 | 0.382128 | false |
herqles-io/hq-manager | src/hqmanager/api/user.py | 1 | 5040 | import cherrypy
class UserAPIController(object):
exposed = True
def __init__(self, identity, assignment):
self.identity = identity
self.assignment = assignment
def index(self):
return "User api Index"
@cherrypy.tools.json_out()
@cherrypy.tools.json_in()
@cherrypy.tools.auth(permission="herqles.user.add")
def add(self):
data = cherrypy.request.json
if 'username' not in data:
raise cherrypy.HTTPError(400, "Missing username")
if 'password' not in data:
raise cherrypy.HTTPError(400, "Missing Password")
output = {'username': data['username'], 'identity': False, 'assignment': False}
if not self.identity.user_exists(data['username']):
self.identity.create_user(data['username'], data['password'])
output['identity'] = True
if not self.assignment.has_assignment(data['username']):
self.assignment.create_assignment(data['username'])
output['assignment'] = True
return output
@cherrypy.tools.json_out()
@cherrypy.tools.auth() # If the username is the requests username allow them to see
def get(self, username):
headers = cherrypy.request.headers
if not self.assignment.has_assignment(username):
raise cherrypy.HTTPError(404, "User does not exist")
if username != cherrypy.request.user['name']:
if not self.assignment.has_permission_token(headers['X-Auth-Token'], 'herqles.user.get'):
raise cherrypy.HTTPError(403, "Invalid permissions")
permissions = self.assignment.get_permissions(username)
return {'username': username, 'permissions': permissions}
@cherrypy.tools.json_out()
@cherrypy.tools.auth(permission="herqles.user.delete")
def delete(self, username):
output = {'username': username, 'identity': False, 'assignment': False}
if not self.identity.user_exists(username):
self.identity.delete_user(username)
output['identity'] = True
if not self.assignment.has_assignment(username):
self.assignment.delete_assignment(username)
output['assignment'] = True
return output
@cherrypy.tools.json_out()
@cherrypy.tools.json_in()
def get_token(self):
data = cherrypy.request.json
if 'username' not in data or 'password' not in data:
raise cherrypy.HTTPError(400, "Username and password required")
if not self.identity.auth(data['username'], data['password']):
raise cherrypy.HTTPError(401, "Invalid username or password")
if not self.assignment.has_assignment(data['username']):
raise cherrypy.HTTPError(404, "User does not exist")
(token, expire_at) = self.assignment.get_token(data['username'])
return {"token": token, 'expire_at': long(expire_at)}
@cherrypy.tools.json_out()
@cherrypy.tools.json_in()
@cherrypy.tools.auth() # We only need to check permissions sometimes
def change_password(self):
headers = cherrypy.request.headers
data = cherrypy.request.json
if 'username' not in data:
raise cherrypy.HTTPError(400, "Missing username")
if 'password' not in data:
raise cherrypy.HTTPError(400, "Missing password")
if data['username'] != cherrypy.request.user['name']:
if not self.assignment.has_permission_token(headers['X-Auth-Token'], 'herqles.user.password'):
raise cherrypy.HTTPError(403, "Invalid permissions")
self.identity.change_password(data['username'], data['password'])
self.assignment.get_token(data['username'], force=True)
return {'username': data['username']}
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
@cherrypy.tools.auth(permission="herqles.user.permission.add")
def add_permission(self):
data = cherrypy.request.json
username = data['username']
permission = data['permission']
if not self.assignment.has_assignment(username):
raise cherrypy.HTTPError(404, "User does not exist")
if self.assignment.has_permission_user(username, permission):
raise cherrypy.HTTPError(409, "User already has permission "+permission)
self.assignment.add_permission(username, permission)
return data
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
@cherrypy.tools.auth(permission="herqles.user.permission.delete")
def remove_permission(self):
data = cherrypy.request.json
username = data['username']
permission = data['permission']
if not self.assignment.has_assignment(username):
raise cherrypy.HTTPError(404, "User does not exist")
if self.assignment.has_permission_user(username, permission, exact=True) is False:
raise cherrypy.HTTPError(409, "User does not have permission "+permission)
return data
| mit | 5,361,003,477,038,764,000 | 34.244755 | 106 | 0.644444 | false |
davidgardenier/frbpoppy | tests/dm_snr/future.py | 1 | 6523 | """Check the log N log F slope for future surveys."""
import numpy as np
import matplotlib.pyplot as plt
from copy import copy
from frbpoppy import CosmicPopulation, Survey, LargePopulation, SurveyPopulation, hist
from frbpoppy import unpickle, pprint
import frbpoppy.direction_dists as did
import frbpoppy.galacticops as go
from tests.convenience import plot_aa_style, rel_path
from tests.rates.alpha_real import EXPECTED
MAKE = True
SURVEYS = ('parkes-htru',
'wsrt-apertif',
'fast-crafts',
'puma-full',
'chord',
'ska1-low',
'ska1-mid')
SIZE = 5e4
if MAKE:
# Calculate the fraction of the sky that the survey covers
surv_f_area = {}
for name in SURVEYS:
pop = CosmicPopulation.simple(5e5)
pop.gen_direction()
survey = Survey(name)
mask = survey.in_region(pop.frbs.ra, pop.frbs.dec,
pop.frbs.gl, pop.frbs.gb)
in_surv_region = np.sum(mask)
tot_region = len(mask)
area_sky = 4*np.pi*(180/np.pi)**2 # In sq. degrees
f_area = (survey.beam_size/area_sky)*(tot_region/in_surv_region)
surv_f_area[name] = f_area
print(f'{name} covers {f_area*100}% of the sky')
surv_pops = []
for name in SURVEYS:
# Set up survey
survey = Survey(name)
if name in ('parkes-htru', 'wsrt-apertif'):
survey.set_beam(model=name)
# Set up CosmicPopulation
pop = CosmicPopulation.optimal(SIZE, generate=False)
# Only generate FRBs in the survey region
pop.set_direction(model='uniform',
min_ra=survey.ra_min,
max_ra=survey.ra_max,
min_dec=survey.dec_min,
max_dec=survey.dec_max)
# Parkes also has galactic limits:
if name == 'parkes-htru':
pop.gen_index()
pop.gen_dist()
pop.gen_time()
# Generate FRBs just within the galactic constraints
pop.gen_direction()
# Gather ra, dec coordinate limits
lims = {'min_ra': survey.ra_min, 'max_ra': survey.ra_max,
'min_dec': survey.dec_min, 'max_dec': survey.dec_max}
def sample(n_gen):
ra, dec = did.uniform(n_srcs=n_gen, **lims)
gl, gb = go.radec_to_lb(ra, dec, frac=True)
coords = [ra, dec, gl, gb]
return coords
def accept(coords):
return survey.in_region(*coords)
coords = sample(int(SIZE))
mask = accept(coords)
reject, = np.where(~mask)
while reject.size > 0:
fill = sample(reject.size)
mask = accept(fill)
for i in range(len(coords)):
coords[i][reject[mask]] = fill[i][mask]
reject = reject[~mask]
# Assign the values
frbs = pop.frbs
frbs.ra, frbs.dec = coords[0], coords[1]
frbs.gl, frbs.gb = coords[2], coords[3]
# Continue with generation
pop.gen_gal_coords()
pop.gen_dm()
pop.gen_w()
pop.gen_lum()
pop.gen_si()
else:
pop.generate()
surv_pop = SurveyPopulation(pop, survey, scale_by_area=False)
surv_pop.source_rate.f_area = surv_f_area[name]
surv_pop.source_rate.scale_by_area()
# surv_pop.save()
surv_pops.append(surv_pop)
else:
surv_pops = []
for name in SURVEYS:
surv_pops.append(unpickle(f'optimal_{name}'))
# Start plot
plot_aa_style(cols=2)
plt.rcParams["figure.figsize"] = (3.556*3, 3.556)
fig, axes = plt.subplots(1, 3)
for ax in axes.flatten():
ax.set_aspect('auto')
# Get norm pop
y = 0
ys = []
names = []
rates = []
norm_sim_rate = surv_pops[0].source_rate.det
norm_real_rate = EXPECTED['parkes-htru'][0] / EXPECTED['parkes-htru'][1]
norm_rate = norm_sim_rate / norm_real_rate
for i, surv_pop in enumerate(surv_pops):
name = surv_pop.name.split('_')[-1]
pprint(name)
if surv_pop.n_sources() == 0:
print(surv_pop.source_rate)
print(f'{name} | no FRBs in population')
continue
names.append(name)
ys.append(y)
# Dimensions measure plot
ax = axes[0]
ax.set_xlabel(r'DM ($\textrm{pc}\ \textrm{cm}^{-3}$)')
ax.set_ylabel(r'\#')
ax.set_yscale('log')
bins, values = hist(surv_pop.frbs.dm, bin_type='lin', norm='frac',
n_bins=20)
values = values.astype(np.float64)
values *= float(surv_pop.source_rate.f_area)*1e6
ax.step(bins, values, where='mid', label=name)
# Fluence plot
ax = axes[1]
ax.set_xlabel('S/N')
ax.set_xscale('log')
ax.set_ylabel(r'\#(${>}\text{S/N}$)')
ax.set_yscale('log')
# Update fluence plot
bins, values = hist(surv_pop.frbs.snr, bin_type='log', norm='frac',
n_bins=25)
# Cumulative sum
values = np.cumsum(values[::-1])[::-1]
values = values.astype(np.float64)
values *= float(surv_pop.source_rate.f_area)*1e6
ax.step(bins, values, where='mid', label=name)
# Plot rates
ax = axes[2]
ax.set_xscale('log')
ax.set_xlabel(r'Rate (day$^{-1}$)')
rate = surv_pop.source_rate.det/norm_rate
print(f'rate: {rate}')
line = ax.errorbar(rate, y,
fmt='x',
label=rf'{name}')
ax.grid()
rates.append(rate)
y += 1
ax.yaxis.tick_right()
ax.set_yticks(ys)
ax.set_yticklabels(names)
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
for i, y in enumerate(ax.get_yticklabels()):
y.set_color(colors[i])
ax.invert_yaxis() # labels read top-to-bottom
# Add thin grey horizontal lines
x_lim = ax.get_xlim()
ax.set_xlim(x_lim)
for i, y in enumerate(ys):
ax.plot((x_lim[0], rates[i]), (y, y), color='k', lw=0.5, zorder=0, ls='--')
for e in list(zip(SURVEYS, rates)):
pprint(e)
euclidean_lines = True
if euclidean_lines:
xlims = axes[1].get_xlim()
ylims = axes[1].get_ylim()
axes[1].set_xlim(xlims)
axes[1].set_ylim(ylims)
xs = np.logspace(np.log10(xlims[0]),
np.log10(xlims[1]),
100)
for n in range(-10, 15):
ys = 10**((np.log10(xs)+n)*-1.5)
axes[1].plot(xs, ys, 'k:', linewidth=0.25)
# plt.legend()
plt.tight_layout()
plt.savefig(rel_path('./plots/future_surveys.pdf'))
| mit | 1,674,763,514,524,134,700 | 28.251121 | 86 | 0.555879 | false |
i02sopop/Kirinki | gstreamer/examples/video_receiver.py | 1 | 2317 | #!/usr/bin/env python
# -=- encoding: utf-8 -=-
################ VIDEO RECEIVER
import gobject, pygst
pygst.require("0.10")
import gst
# TODO: detect from the RTPSource element inside the GstRtpBin
REMOTE_HOST = '192.168.34.150'
READ_VIDEO_CAPS = 'video.caps'
pipeline = gst.Pipeline('server')
caps = open(READ_VIDEO_CAPS).read().replace('\\', '')
rtpbin = gst.element_factory_make('gstrtpbin', 'rtpbin')
rtpbin.set_property('latency', 400)
udpsrc_rtpin = gst.element_factory_make('udpsrc', 'udpsrc0')
udpsrc_rtpin.set_property('port', 10000)
udpsrc_caps = gst.caps_from_string(caps)
udpsrc_rtpin.set_property('caps', udpsrc_caps)
udpsrc_rtcpin = gst.element_factory_make('udpsrc', 'udpsrc1')
udpsrc_rtcpin.set_property('port', 10001)
udpsink_rtcpout = gst.element_factory_make('udpsink', 'udpsink0')
udpsink_rtcpout.set_property('host', REMOTE_HOST)
udpsink_rtcpout.set_property('port', 10002)
rtph264depay = gst.element_factory_make('rtph264depay', 'rtpdepay')
q1 = gst.element_factory_make("queue", "q1")
q2 = gst.element_factory_make("queue", "q2")
avimux = gst.element_factory_make('avimux', 'avimux')
filesink = gst.element_factory_make('filesink', 'filesink')
filesink.set_property('location', '/tmp/go.avi')
ffmpegcs = gst.element_factory_make("ffmpegcolorspace", "ffmpegcs")
ffdec264 = gst.element_factory_make('ffdec_h264', 'ffdec264')
autovideosink = gst.element_factory_make('autovideosink')
pipeline.add(rtpbin, udpsrc_rtpin, udpsrc_rtcpin, udpsink_rtcpout,
rtph264depay, q1, avimux, ffdec264, autovideosink)
# Receive the RTP and RTCP streams
udpsrc_rtpin.link_pads('src', rtpbin, 'recv_rtp_sink_0')
udpsrc_rtcpin.link_pads('src', rtpbin, 'recv_rtcp_sink_0')
# reply with RTCP stream
rtpbin.link_pads('send_rtcp_src_0', udpsink_rtcpout, 'sink')
# Plus the RTP into the rest of the pipe...
def rtpbin_pad_added(obj, pad):
print "PAD ADDED"
print " obj", obj
print " pad", pad
rtpbin.link(rtph264depay)
rtpbin.connect('pad-added', rtpbin_pad_added)
gst.element_link_many(rtph264depay, q1, ffdec264, autovideosink)
def start():
pipeline.set_state(gst.STATE_PLAYING)
udpsink_rtcpout.set_locked_state(gst.STATE_PLAYING)
print "Started..."
def loop():
print "Running..."
gobject.MainLoop().run()
if __name__ == '__main__':
start()
loop()
| agpl-3.0 | -9,069,301,017,670,709,000 | 33.073529 | 67 | 0.70738 | false |
cloudysunny14/CloudySwitch | cloudyswitch/app/psyco_eventlet.py | 1 | 2308 | """A wait callback to allow psycopg2 cooperation with eventlet.
Use `make_psycopg_green()` to enable eventlet support in Psycopg.
"""
# Copyright (C) 2010 Daniele Varrazzo <[email protected]>
# and licensed under the MIT license:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import psycopg2
from psycopg2 import extensions
from eventlet.hubs import trampoline
LOG = logging.getLogger(__name__)
def make_psycopg_green():
"""Configure Psycopg to be used with eventlet in non-blocking way."""
if not hasattr(extensions, 'set_wait_callback'):
raise ImportError(
"support for coroutines not available in this Psycopg version (%s)"
% psycopg2.__version__)
extensions.set_wait_callback(eventlet_wait_callback)
def eventlet_wait_callback(conn, timeout=-1):
"""A wait callback useful to allow eventlet to work with Psycopg."""
while 1:
state = conn.poll()
if state == extensions.POLL_OK:
break
elif state == extensions.POLL_READ:
trampoline(conn.fileno(), read=True)
elif state == extensions.POLL_WRITE:
trampoline(conn.fileno(), write=True)
else:
raise psycopg2.OperationalError(
"Bad result from poll: %r" % state)
| apache-2.0 | -6,447,168,683,168,695,000 | 42.54717 | 79 | 0.717938 | false |
fpdetective/fpdetective | src/crawler/fp_regex.py | 1 | 2460 | BLUECAVA = 1
INSIDEGRAPH = 2
THREATMETRIX = 3
IOVATION = 4
MAXMIND = 5
ANALYTICSENGINE = 6
COINBASE = 7
SITEBLACKBOX = 8
PERFERENCEMENT = 9
MYFREECAMS = 10
MINDSHARE = 11
AFKMEDIA = 12
CDNNET = 13
ANALYTICSPROS = 14
ANONYMIZER = 15
AAMI = 16
VIRWOX = 17
ISINGLES = 18
BBELEMENTS = 19
PIANOMEDIA = 20
ALIBABA = 21
MERCADOLIBRE = 22
LIGATUS = 23
FINGERPRINTER_REGEX = {'(lookup|ds|collective|clients)\.bluecava.com': BLUECAVA,
'inside-graph\.com/ig\.js': INSIDEGRAPH,
'online-metrix\.net': THREATMETRIX,
'mpsnare\.iesnare\.com': IOVATION,
'(d2fhjc7xo4fbfa.cloudfront.net|maxmind.com).*device.js': MAXMIND,
'analytics-engine\.net/detector/fp\.js': ANALYTICSENGINE,
'sl[0-9]\.analytics-engine\.net/fingerprint': ANALYTICSENGINE,
'web-aupair.net/sites/default/files/fp/fp\.js': ANALYTICSENGINE,
'(coinbase.com|d2o7j92jk8qjiw.cloudfront.net)/assets/application\-[0-9a-z]{32}\.js': COINBASE,
'sbbpg=sbbShell': SITEBLACKBOX,
'tags\.master-perf-tools\.com/V\d+test/tagv[0-9]+.pkmin\.js' : PERFERENCEMENT,
'mfc\d/lib/o-mfccore\.js': MYFREECAMS,
'jslib/pomegranate\.js': MINDSHARE,
'gmyze\.com.*(fingerprint|ax)\.js': AFKMEDIA,
'cdn-net\.com/cc\.js': CDNNET,
'privacytool\.org/AnonymityChecker/js/fontdetect\.js': ANONYMIZER,
'analyticsengine\.s3\.amazonaws\.com/archive/fingerprint\.compiled\.js': ANALYTICSPROS, # taken down. old url was http://dpp750yjcl65g.cloudfront.net/analyticsengine/util/fingerprint.compiled.js
'dscke\.suncorp\.com\.au/datastream-web/resources/js/fp/fontlist-min\.js': AAMI,
'virwox\.com/affiliate_tracker\.js': VIRWOX,
'isingles\.co\.uk/js/fprint/_core\.js': ISINGLES,
'(ibillboard|bbelements).*bbnaut\.swf': BBELEMENTS,
'pianomedia\.eu.*novosense\.swf': PIANOMEDIA,
'ali(pay|baba).*lsa.swf': ALIBABA,
'mercadoli[b|v]re.*dpe-.*swf': MERCADOLIBRE,
'ligatus.com.*fingerprint.*js': LIGATUS,
}
| gpl-3.0 | 8,656,498,202,415,188,000 | 47.235294 | 219 | 0.545122 | false |
tu-rbo/differentiable-particle-filters | methods/dpf_kitti.py | 1 | 43029 | import os
import numpy as np
import sonnet as snt
import tensorflow as tf
import matplotlib.pyplot as plt
from utils.data_utils_kitti import wrap_angle, compute_statistics, split_data, make_batch_iterator, make_repeating_batch_iterator, rotation_matrix, load_data_for_stats
from utils.method_utils import atan2, compute_sq_distance
from utils.plotting_utils import plot_maze, show_pause
from datetime import datetime
if tf.__version__ == '1.1.0-rc1' or tf.__version__ == '1.2.0':
from tensorflow.python.framework import ops
@ops.RegisterGradient("FloorMod")
def _mod_grad(op, grad):
x, y = op.inputs
gz = grad
x_grad = gz
y_grad = None # tf.reduce_mean(-(x // y) * gz, axis=[0], keep_dims=True)[0]
return x_grad, y_grad
class DPF():
def __init__(self, init_with_true_state, learn_odom, use_proposer, propose_ratio, proposer_keep_ratio, min_obs_likelihood, learn_gaussian_mle):
"""
:param init_with_true_state:
:param learn_odom:
:param use_proposer:
:param propose_ratio:
:param particle_std:
:param proposer_keep_ratio:
:param min_obs_likelihood:
"""
# store hyperparameters which are needed later
self.init_with_true_state = init_with_true_state
self.learn_odom = learn_odom
self.use_proposer = use_proposer and not init_with_true_state # only use proposer if we do not initializet with true state
self.propose_ratio = propose_ratio if not self.init_with_true_state else 0.0
# define some more parameters and placeholders
self.state_dim = 5
self.action_dim = 3
self.observation_dim = 6
self.placeholders = {'o': tf.placeholder('float32', [None, None, 50, 150, self.observation_dim], 'observations'),
'a': tf.placeholder('float32', [None, None, 3], 'actions'),
's': tf.placeholder('float32', [None, None, 5], 'states'),
'num_particles': tf.placeholder('float32'),
'keep_prob': tf.placeholder_with_default(tf.constant(1.0), []),
'is_training': tf.placeholder_with_default(tf.constant(False), [])
}
self.num_particles_float = self.placeholders['num_particles']
self.num_particles = tf.to_int32(self.num_particles_float)
# build learnable modules
self.build_modules(min_obs_likelihood, proposer_keep_ratio, learn_gaussian_mle)
def build_modules(self, min_obs_likelihood, proposer_keep_ratio, learn_gaussian_mle):
"""
:param min_obs_likelihood:
:param proposer_keep_ratio:
:return: None
"""
# MEASUREMENT MODEL
# conv net for encoding the image
self.encoder = snt.Sequential([
snt.nets.ConvNet2D([16, 16, 16, 16], [[7, 7], [5, 5], [5, 5], [5, 5]], [[1,1], [1, 2], [1, 2], [2, 2]], [snt.SAME], activate_final=True, name='encoder/convnet'),
snt.BatchFlatten(),
lambda x: tf.nn.dropout(x, self.placeholders['keep_prob']),
snt.Linear(128, name='encoder/linear'),
tf.nn.relu
])
# observation likelihood estimator that maps states and image encodings to probabilities
self.obs_like_estimator = snt.Sequential([
snt.Linear(128, name='obs_like_estimator/linear'),
tf.nn.relu,
snt.Linear(128, name='obs_like_estimator/linear'),
tf.nn.relu,
snt.Linear(1, name='obs_like_estimator/linear'),
tf.nn.sigmoid,
lambda x: x * (1 - min_obs_likelihood) + min_obs_likelihood
], name='obs_like_estimator')
# motion noise generator used for motion sampling
if learn_gaussian_mle:
self.mo_noise_generator = snt.nets.MLP([32, 32, 4], activate_final=False, name='mo_noise_generator')
else:
self.mo_noise_generator = snt.nets.MLP([32, 32, 2], activate_final=False, name='mo_noise_generator')
# odometry model (if we want to learn it)
if self.learn_odom:
self.mo_transition_model = snt.nets.MLP([128, 128, 128, self.state_dim], activate_final=False, name='mo_transition_model')
# particle proposer that maps encodings to particles (if we want to use it)
if self.use_proposer:
self.particle_proposer = snt.Sequential([
snt.Linear(128, name='particle_proposer/linear'),
tf.nn.relu,
lambda x: tf.nn.dropout(x, proposer_keep_ratio),
snt.Linear(128, name='particle_proposer/linear'),
tf.nn.relu,
snt.Linear(128, name='particle_proposer/linear'),
tf.nn.relu,
snt.Linear(128, name='particle_proposer/linear'),
tf.nn.relu,
snt.Linear(4, name='particle_proposer/linear'),
tf.nn.tanh,
])
self.noise_scaler1 = snt.Module(lambda x: x * tf.exp(10 * tf.get_variable('motion_sampler/noise_scaler1', initializer=np.array(0.0, dtype='float32'))))
self.noise_scaler2 = snt.Module(lambda x: x * tf.exp(10 * tf.get_variable('motion_sampler/noise_scaler2', initializer=np.array(0.0, dtype='float32'))))
def custom_build(self, inputs):
"""A custom build method to wrap into a sonnet Module."""
outputs = snt.Conv2D(output_channels=16, kernel_shape=[7, 7], stride=[1, 1])(inputs)
outputs = tf.nn.relu(outputs)
outputs = snt.Conv2D(output_channels=16, kernel_shape=[5, 5], stride=[1, 2])(outputs)
outputs = tf.nn.relu(outputs)
outputs = snt.Conv2D(output_channels=16, kernel_shape=[5, 5], stride=[1, 2])(outputs)
outputs = tf.nn.relu(outputs)
outputs = snt.Conv2D(output_channels=16, kernel_shape=[5, 5], stride=[2, 2])(outputs)
outputs = tf.nn.relu(outputs)
outputs = tf.nn.dropout(outputs, self.placeholders['keep_prob'])
outputs = snt.BatchFlatten()(outputs)
outputs = snt.Linear(128)(outputs)
outputs = tf.nn.relu(outputs)
return outputs
def measurement_update(self, encoding, particles, means, stds):
"""
Compute the likelihood of the encoded observation for each particle.
:param encoding: encoding of the observation
:param particles:
:param means:
:param stds:
:return: observation likelihood
"""
# prepare input (normalize particles poses and repeat encoding per particle)
particle_input = self.transform_particles_as_input(particles, means, stds)
encoding_input = tf.tile(encoding[:, tf.newaxis, :], [1, tf.shape(particles)[1], 1])
input = tf.concat([encoding_input, particle_input], axis=-1)
# estimate the likelihood of the encoded observation for each particle, remove last dimension
obs_likelihood = snt.BatchApply(self.obs_like_estimator)(input)[:, :, 0]
return obs_likelihood
def transform_particles_as_input(self, particles, means, stds):
return ((particles - means['s']) / stds['s'])[..., 3:5]
def propose_particles(self, encoding, num_particles, state_mins, state_maxs):
duplicated_encoding = tf.tile(encoding[:, tf.newaxis, :], [1, num_particles, 1])
proposed_particles = snt.BatchApply(self.particle_proposer)(duplicated_encoding)
proposed_particles = tf.concat([
proposed_particles[:,:,:1] * (state_maxs[0] - state_mins[0]) / 2.0 + (state_maxs[0] + state_mins[0]) / 2.0,
proposed_particles[:,:,1:2] * (state_maxs[1] - state_mins[1]) / 2.0 + (state_maxs[1] + state_mins[1]) / 2.0,
atan2(proposed_particles[:,:,2:3], proposed_particles[:,:,3:4])], axis=2)
return proposed_particles
def motion_update(self, actions, particles, means, stds, state_step_sizes, learn_gaussian_mle, stop_sampling_gradient=False):
"""
Move particles according to odometry info in actions. Add learned noise.
:param actions:
:param particles:
:param means:
:param stds:
:param state_step_sizes:
:param stop_sampling_gradient:
:return: moved particles
"""
# 1. SAMPLE NOISY ACTIONS
# add dimension for particles
time_step = 0.103
if learn_gaussian_mle:
actions = tf.concat([particles[:, :, 3:4] - means['s'][:, :, 3:4], particles[:, :, 4:5] - means['s'][:, :, 4:5]], axis=-1)
# prepare input (normalize actions and repeat per particle)
action_input = actions / stds['s'][:, :, 3:5]
input = action_input
# estimate action noise
delta = snt.BatchApply(self.mo_noise_generator)(input)
delta = tf.concat([delta[:, :, 0:2] * state_step_sizes[3], delta[:, :, 2:4] * state_step_sizes[4]], axis=-1)
if stop_sampling_gradient:
delta = tf.stop_gradient(delta)
action_vel_f = tf.random_normal(tf.shape(particles[:, :, 3:4]), mean = delta[:, :, 0:1], stddev = delta[:, :, 1:2])
action_vel_rot = tf.random_normal(tf.shape(particles[:, :, 4:5]), mean = delta[:, :, 2:3], stddev = delta[:, :, 3:4])
heading = particles[:, :, 2:3]
sin_heading = tf.sin(heading)
cos_heading = tf.cos(heading)
new_x = particles[:, :, 0:1] + cos_heading * particles[:, :, 3:4] * time_step
new_y = particles[:, :, 1:2] + sin_heading * particles[:, :, 3:4] * time_step
new_theta = particles[:, :, 2:3] + particles[:, :, 4:5] * time_step
wrap_angle(new_theta)
new_v = particles[:, :, 3:4] + action_vel_f
new_theta_dot = particles[:, :, 4:5] + action_vel_rot
moved_particles = tf.concat([new_x, new_y, new_theta, new_v, new_theta_dot], axis=-1)
return moved_particles, delta
else:
heading = particles[:, :, 2:3]
sin_heading = tf.sin(heading)
cos_heading = tf.cos(heading)
random_input = tf.random_normal(tf.shape(particles[:, :, 3:5]))
noise = snt.BatchApply(self.mo_noise_generator)(random_input)
noise = noise - tf.reduce_mean(noise, axis=1, keep_dims=True)
new_z = particles[:, :, 0:1] + cos_heading * particles[:, :, 3:4] * time_step
new_x = particles[:, :, 1:2] + sin_heading * particles[:, :, 3:4] * time_step
new_theta = wrap_angle(particles[:, :, 2:3] + particles[:, :, 4:5] * time_step)
new_v = particles[:, :, 3:4] + noise[:, :, :1] * state_step_sizes[3]
new_theta_dot = particles[:, :, 4:5] + noise[:, :, 1:] * state_step_sizes[4]
moved_particles = tf.concat([new_z, new_x, new_theta, new_v, new_theta_dot], axis=-1)
return moved_particles
def compile_training_stages(self, sess, batch_iterators, particle_list, particle_probs_list, encodings, means, stds, state_step_sizes, state_mins, state_maxs, learn_gaussian_mle, learning_rate, plot_task):
# TRAINING!
losses = dict()
train_stages = dict()
std = 0.25
# TRAIN ODOMETRY
if self.learn_odom:
# apply model
motion_samples = self.motion_update(self.placeholders['a'][:,0],
self.placeholders['s'][:, :1],
means, stds, state_step_sizes,
stop_sampling_gradient=True)
# define loss and optimizer
sq_distance = compute_sq_distance(motion_samples, self.placeholders['s'][:, 1:2], state_step_sizes)
losses['motion_mse'] = tf.reduce_mean(sq_distance, name='loss')
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# put everything together
train_stages['train_odom'] = {
'train_op': optimizer.minimize(losses['motion_mse']),
'batch_iterator_names': {'train': 'train1', 'val': 'val1'},
'monitor_losses': ['motion_mse'],
'validation_loss': 'motion_mse',
'plot': lambda e: self.plot_motion_model(sess, next(batch_iterators['val2']), motion_samples, plot_task, state_step_sizes) if e % 1 == 0 else None
}
# TRAIN MOTION MODEL
if learn_gaussian_mle:
motion_samples, motion_params = self.motion_update(self.placeholders['a'][:,1],
tf.tile(self.placeholders['s'][:, :1], [1, 1, 1]),
means, stds, state_step_sizes, learn_gaussian_mle)
# define loss and optimizer
diff_in_states = self.placeholders['s'][:, 1:2] - self.placeholders['s'][:, :1]
activations_vel_f = (1 / 32) / tf.sqrt(2 * np.pi * motion_params[:, :, 1] ** 2) * tf.exp(
-(diff_in_states[:, :, 3] - motion_params[:, :, 0]) ** 2 / (2.0 * motion_params[:, :, 1] ** 2))
activations_vel_rot = (1 / 32) / tf.sqrt(2 * np.pi * motion_params[:, :, 3] ** 2) * tf.exp(
-(diff_in_states[:, :, 4] - motion_params[:, :, 2]) ** 2 / (2.0 * motion_params[:, :, 3] ** 2))
losses['motion_mle'] = tf.reduce_mean(-tf.log(1e-16 + (tf.reduce_sum(activations_vel_f, axis=-1, name='loss1') * tf.reduce_sum(activations_vel_rot, axis=-1, name='loss2'))))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# put everything together
train_stages['train_motion_sampling'] = {
'train_op': optimizer.minimize(losses['motion_mle']),
'batch_iterator_names': {'train': 'train2', 'val': 'val2'},
'monitor_losses': ['motion_mle'],
'validation_loss': 'motion_mle',
'plot': lambda e: self.plot_motion_model(sess, next(batch_iterators['val2']), motion_samples, plot_task, state_step_sizes) if e % 1 == 0 else None
}
else:
motion_samples = self.motion_update(self.placeholders['a'][:,1],
tf.tile(self.placeholders['s'][:, :1], [1, self.num_particles, 1]),
means, stds, state_step_sizes, learn_gaussian_mle)
# define loss and optimizer
sq_distance = compute_sq_distance(motion_samples, self.placeholders['s'][:, 1:2], state_step_sizes)
activations_sample = (1 / self.num_particles_float) / tf.sqrt(2 * np.pi * std ** 2) * tf.exp(
-sq_distance / (2.0 * std ** 2))
losses['motion_mle'] = tf.reduce_mean(-tf.log(1e-16 + tf.reduce_sum(activations_sample, axis=-1, name='loss')))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# put everything together
train_stages['train_motion_sampling'] = {
'train_op': optimizer.minimize(losses['motion_mle']),
'batch_iterator_names': {'train': 'train2', 'val': 'val2'},
'monitor_losses': ['motion_mle'],
'validation_loss': 'motion_mle',
'plot': lambda e: self.plot_motion_model(sess, next(batch_iterators['val2']), motion_samples, plot_task, state_step_sizes) if e % 1 == 0 else None
}
# TRAIN MEASUREMENT MODEL
# apply model for all pairs of observations and states in that batch
test_particles = tf.tile(self.placeholders['s'][tf.newaxis, :, 0], [self.batch_size, 1, 1])
measurement_model_out = self.measurement_update(encodings[:, 0], test_particles, means, stds)
# define loss (correct -> 1, incorrect -> 0) and optimizer
correct_samples = tf.diag_part(measurement_model_out)
incorrect_samples = measurement_model_out - tf.diag(tf.diag_part(measurement_model_out))
losses['measurement_heuristic'] = tf.reduce_sum(-tf.log(correct_samples)) / tf.cast(self.batch_size, tf.float32) \
+ tf.reduce_sum(-tf.log(1.0 - incorrect_samples)) / tf.cast(self.batch_size * (self.batch_size - 1), tf.float32)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# put everything together
train_stages['train_measurement_model'] = {
'train_op': optimizer.minimize(losses['measurement_heuristic']),
'batch_iterator_names': {'train': 'train1', 'val': 'val1'},
'monitor_losses': ['measurement_heuristic'],
'validation_loss': 'measurement_heuristic',
'plot': lambda e: self.plot_measurement_model(sess, batch_iterators['val1'], measurement_model_out) if e % 1 == 0 else None
}
# TRAIN PARTICLE PROPOSER
if self.use_proposer:
# apply model (but only compute gradients until the encoding,
# otherwise we would unlearn it and the observation likelihood wouldn't work anymore)
proposed_particles = self.propose_particles(tf.stop_gradient(encodings[:, 0]), self.num_particles, state_mins, state_maxs)
# define loss and optimizer
std = 0.2
sq_distance = compute_sq_distance(proposed_particles, self.placeholders['s'][:, :1], state_step_sizes)
activations = (1 / self.num_particles_float) / tf.sqrt(2 * np.pi * std ** 2) * tf.exp(
-sq_distance / (2.0 * std ** 2))
losses['proposed_mle'] = tf.reduce_mean(-tf.log(1e-16 + tf.reduce_sum(activations, axis=-1)))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# put everything together
train_stages['train_particle_proposer'] = {
'train_op': optimizer.minimize(losses['proposed_mle']),
'batch_iterator_names': {'train': 'train1', 'val': 'val1'},
'monitor_losses': ['proposed_mle'],
'validation_loss': 'proposed_mle',
'plot': lambda e: self.plot_particle_proposer(sess, next(batch_iterators['val1']), proposed_particles, plot_task) if e % 10 == 0 else None
}
# END-TO-END TRAINING
# model was already applied further up -> particle_list, particle_probs_list
# define losses and optimizer
# first loss (which is being optimized)
sq_distance = compute_sq_distance(particle_list[:, :, :, 3:5], self.placeholders['s'][:, :, tf.newaxis, 3:5], state_step_sizes[3:5])
activations = particle_probs_list[:, :] / tf.sqrt(2 * np.pi * self.particle_std ** 2) * tf.exp(
-sq_distance / (2.0 * self.particle_std ** 2))
losses['mle'] = tf.reduce_mean(-tf.log(1e-16 + tf.reduce_sum(activations, axis=2, name='loss')))
# second loss (which we will monitor during execution)
pred = self.particles_to_state(particle_list, particle_probs_list)
sq_error = compute_sq_distance(pred[:, -1, 0:2], self.placeholders['s'][:, -1, 0:2], [1., 1.])
sq_dist = compute_sq_distance(self.placeholders['s'][:, 0, 0:2], self.placeholders['s'][:, -1, 0:2], [1., 1.])
losses['m/m'] = tf.reduce_mean(sq_error**0.5/sq_dist**0.5)
sq_error = compute_sq_distance(pred[:, -1, 2:3], self.placeholders['s'][:, -1, 2:3], [np.pi/180.0])
losses['deg/m'] = tf.reduce_mean(sq_error ** 0.5 / sq_dist ** 0.5)
# optimizer
optimizer = tf.train.AdamOptimizer(learning_rate)
# put everything together
train_stages['train_e2e'] = {
'train_op': optimizer.minimize(losses['mle']),
'batch_iterator_names': {'train': 'train', 'val': 'val'},
'monitor_losses': ['m/m', 'deg/m', 'mle'],
'validation_loss': 'deg/m',
'plot': lambda e: self.plot_particle_filter(sess, next(batch_iterators['val_ex']), particle_list,
particle_probs_list, state_step_sizes, plot_task) if e % 1 == 0 else None
}
return losses, train_stages
def load(self, sess, model_path, model_file='best_validation', statistics_file='statistics.npz', connect_and_initialize=True, modules=('encoder', 'mo_noise_generator', 'mo_transition_model', 'obs_like_estimator', 'particle_proposer')):
if type(modules) not in [type(list()), type(tuple())]:
raise Exception('modules must be a list or tuple, not a ' + str(type(modules)))
# build the tensorflow graph
if connect_and_initialize:
# load training data statistics (which are needed to build the tf graph)
statistics = dict(np.load(os.path.join(model_path, statistics_file)))
for key in statistics.keys():
if statistics[key].shape == ():
statistics[key] = statistics[key].item() # convert 0d array of dictionary back to a normal dictionary
# connect all modules into the particle filter
self.connect_modules(**statistics)
init = tf.global_variables_initializer()
sess.run(init)
# load variables
all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
vars_to_load = []
loaded_modules = set()
for v in all_vars:
for m in modules:
if m in v.name:
vars_to_load.append(v)
loaded_modules.add(m)
print('Loading all modules')
saver = tf.train.Saver()
saver.restore(sess, os.path.join(model_path, model_file))
# def fit(self, sess, data, model_path, train_individually, train_e2e, split_ratio, seq_len, batch_size, epoch_length, num_epochs, patience, learning_rate, dropout_keep_ratio, num_particles, particle_std, plot_task=None, plot=False):
def fit(self, sess, data, model_path, train_individually, train_e2e, split_ratio, seq_len, batch_size, epoch_length, num_epochs, patience, learning_rate, dropout_keep_ratio, num_particles, particle_std, learn_gaussian_mle, plot_task=None, plot=False):
if plot:
plt.ion()
self.particle_std = particle_std
mean_loss_for_plot = np.zeros((1,))
means, stds, state_step_sizes, state_mins, state_maxs = compute_statistics(data)
data = split_data(data, ratio=split_ratio)
epoch_lengths = {'train': epoch_length, 'val': epoch_length*2}
batch_iterators = {'train': make_batch_iterator(data['train'], seq_len=seq_len, batch_size=batch_size),
'val': make_repeating_batch_iterator(data['val'], epoch_lengths['val'], batch_size=batch_size, seq_len=seq_len),
'train_ex': make_batch_iterator(data['train'], batch_size=batch_size, seq_len=seq_len),
'val_ex': make_batch_iterator(data['val'], batch_size=batch_size, seq_len=seq_len),
'train1': make_batch_iterator(data['train'], batch_size=batch_size, seq_len=1),
'train2': make_batch_iterator(data['train'], batch_size=batch_size, seq_len=2),
'val1': make_repeating_batch_iterator(data['val'], epoch_lengths['val'], batch_size=batch_size, seq_len=1),
'val2': make_repeating_batch_iterator(data['val'], epoch_lengths['val'], batch_size=batch_size, seq_len=2),
}
# build the tensorflow graph by connecting all modules in the particles filter
particles, particle_probs, encodings, particle_list, particle_probs_list = self.connect_modules(means, stds, state_mins, state_maxs, state_step_sizes, learn_gaussian_mle)
# define losses and train stages for different ways of training (e.g. training individual models and e2e training)
losses, train_stages = self.compile_training_stages(sess, batch_iterators, particle_list, particle_probs_list,
encodings, means, stds, state_step_sizes, state_mins,
state_maxs, learn_gaussian_mle, learning_rate, plot_task)
# initialize variables
init = tf.global_variables_initializer()
sess.run(init)
# save statistics and prepare saving variables
if not os.path.exists(model_path):
os.makedirs(model_path)
np.savez(os.path.join(model_path, 'statistics'), means=means, stds=stds, state_step_sizes=state_step_sizes,
state_mins=state_mins, state_maxs=state_maxs)
saver = tf.train.Saver()
save_path = os.path.join(model_path, 'best_validation')
# define the training curriculum
curriculum = []
if train_individually:
if self.learn_odom:
curriculum += ['train_odom']
curriculum += ['train_measurement_model']
curriculum += ['train_motion_sampling']
if self.use_proposer:
curriculum += ['train_particle_proposer']
if train_e2e:
curriculum += ['train_e2e']
# split data for early stopping
data_keys = ['train']
if split_ratio < 1.0:
data_keys.append('val')
# define log dict
log = {c: {dk: {lk: {'mean': [], 'se': []} for lk in train_stages[c]['monitor_losses']} for dk in data_keys} for c in curriculum}
# go through curriculum
for c in curriculum:
stage = train_stages[c]
best_val_loss = np.inf
best_epoch = 0
epoch = 0
if c == 'train_e2e':
saver.save(sess, os.path.join(model_path, 'before_e2e/best_validation'))
np.savez(os.path.join(model_path, 'before_e2e/statistics'), means=means, stds=stds, state_step_sizes=state_step_sizes,
state_mins=state_mins, state_maxs=state_maxs)
while epoch < num_epochs and epoch - best_epoch < patience:
# training
for dk in data_keys:
# don't train in the first epoch, just evaluate the initial parameters
if dk == 'train' and epoch == 0:
continue
# set up loss lists which will be filled during the epoch
loss_lists = {lk: [] for lk in stage['monitor_losses']}
for e in range(epoch_lengths[dk]):
# t0 = time.time()
# pick a batch from the right iterator
batch = next(batch_iterators[stage['batch_iterator_names'][dk]])
# define the inputs and train/run the model
input_dict = {**{self.placeholders[key]: batch[key] for key in 'osa'},
**{self.placeholders['num_particles']: num_particles},
}
if dk == 'train':
input_dict[self.placeholders['keep_prob']] = dropout_keep_ratio
input_dict[self.placeholders['is_training']] = True
monitor_losses = {l: losses[l] for l in stage['monitor_losses']}
if dk == 'train':
s_losses, _ = sess.run([monitor_losses, stage['train_op']], input_dict)
else:
s_losses = sess.run(monitor_losses, input_dict)
for lk in stage['monitor_losses']:
loss_lists[lk].append(s_losses[lk])
# after each epoch, compute and log statistics
for lk in stage['monitor_losses']:
log[c][dk][lk]['mean'].append(np.mean(loss_lists[lk]))
log[c][dk][lk]['se'].append(np.std(loss_lists[lk], ddof=1) / np.sqrt(len(loss_lists[lk])))
# check whether the current model is better than all previous models
if 'val' in data_keys:
current_val_loss = log[c]['val'][stage['validation_loss']]['mean'][-1]
mean_loss_for_plot = np.append(mean_loss_for_plot,current_val_loss)
if current_val_loss < best_val_loss:
best_val_loss = current_val_loss
best_epoch = epoch
# save current model
saver.save(sess, save_path)
txt = 'epoch {:>3} >> '.format(epoch)
else:
txt = 'epoch {:>3} == '.format(epoch)
else:
best_epoch = epoch
saver.save(sess, save_path)
txt = 'epoch {:>3} >> '.format(epoch)
# after going through all data sets, do a print out of the current result
for lk in stage['monitor_losses']:
txt += '{}: '.format(lk)
for dk in data_keys:
if len(log[c][dk][lk]['mean']) > 0:
txt += '{:.2f}+-{:.2f}/'.format(log[c][dk][lk]['mean'][-1], log[c][dk][lk]['se'][-1])
txt = txt[:-1] + ' -- '
print(txt)
if plot:
stage['plot'](epoch)
epoch += 1
# after running out of patience, restore the model with lowest validation loss
saver.restore(sess, save_path)
return log
def predict(self, sess, batch, return_particles=False, **kwargs):
# define input dict, use the first state only if we do tracking
input_dict = {self.placeholders['o']: batch['o'],
self.placeholders['a']: batch['a'],
self.placeholders['num_particles']: 100}
if self.init_with_true_state:
input_dict[self.placeholders['s']] = batch['s'][:, :1]
if return_particles:
return sess.run([self.pred_states, self.particle_list, self.particle_probs_list], input_dict)
else:
return sess.run(self.pred_states, input_dict)
def connect_modules(self, means, stds, state_mins, state_maxs, state_step_sizes, learn_gaussian_mle=False):
# get shapes
self.batch_size = tf.shape(self.placeholders['o'])[0]
self.seq_len = tf.shape(self.placeholders['o'])[1]
# we use the static shape here because we need it to build the graph
self.action_dim = self.placeholders['a'].get_shape()[-1].value
encodings = snt.BatchApply(self.encoder)((self.placeholders['o'] - means['o']) / stds['o'])
# initialize particles
if self.init_with_true_state:
# tracking with known initial state
initial_particles = tf.tile(self.placeholders['s'][:, 0, tf.newaxis, :], [1, self.num_particles, 1])
else:
# global localization
if self.use_proposer:
# propose particles from observations
initial_particles = self.propose_particles(encodings[:, 0], self.num_particles, state_mins, state_maxs)
else:
# sample particles randomly
initial_particles = tf.concat(
[tf.random_uniform([self.batch_size, self.num_particles, 1], state_mins[d], state_maxs[d]) for d in
range(self.state_dim)], axis=-1, name='particles')
initial_particle_probs = tf.ones([self.batch_size, self.num_particles],
name='particle_probs') / self.num_particles_float
# assumes that samples has the correct size
def permute_batch(x, samples):
# get shapes
batch_size = tf.shape(x)[0]
num_particles = tf.shape(x)[1]
sample_size = tf.shape(samples)[1]
# compute 1D indices into the 2D array
idx = samples + num_particles * tf.tile(
tf.reshape(tf.range(batch_size), [batch_size, 1]),
[1, sample_size])
# index using the 1D indices and reshape again
result = tf.gather(tf.reshape(x, [batch_size * num_particles, -1]), idx)
result = tf.reshape(result, tf.shape(x[:,:sample_size]))
return result
def loop(particles, particle_probs, particle_list, particle_probs_list, additional_probs_list, i):
num_proposed_float = tf.round((self.propose_ratio ** tf.cast(i, tf.float32)) * self.num_particles_float)
num_proposed = tf.cast(num_proposed_float, tf.int32)
num_resampled_float = self.num_particles_float - num_proposed_float
num_resampled = tf.cast(num_resampled_float, tf.int32)
if self.propose_ratio < 1.0:
# resampling
basic_markers = tf.linspace(0.0, (num_resampled_float - 1.0) / num_resampled_float, num_resampled)
random_offset = tf.random_uniform([self.batch_size], 0.0, 1.0 / num_resampled_float)
markers = random_offset[:, None] + basic_markers[None, :] # shape: batch_size x num_resampled
cum_probs = tf.cumsum(particle_probs, axis=1)
marker_matching = markers[:, :, None] < cum_probs[:, None, :] # shape: batch_size x num_resampled x num_particles
samples = tf.cast(tf.argmax(tf.cast(marker_matching, 'int32'), dimension=2), 'int32')
standard_particles = permute_batch(particles, samples)
standard_particle_probs = tf.ones([self.batch_size, num_resampled])
standard_particles = tf.stop_gradient(standard_particles)
standard_particle_probs = tf.stop_gradient(standard_particle_probs)
# motion update
if learn_gaussian_mle:
standard_particles, _ = self.motion_update(self.placeholders['a'][:, i], standard_particles, means, stds, state_step_sizes, learn_gaussian_mle)
else:
standard_particles = self.motion_update(self.placeholders['a'][:, i], standard_particles, means, stds, state_step_sizes, learn_gaussian_mle)
# measurement update
standard_particle_probs *= self.measurement_update(encodings[:, i], standard_particles, means, stds)
if self.propose_ratio > 0.0:
# proposed particles
proposed_particles = self.propose_particles(encodings[:, i], num_proposed, state_mins, state_maxs)
proposed_particle_probs = tf.ones([self.batch_size, num_proposed])
# NORMALIZE AND COMBINE PARTICLES
if self.propose_ratio == 1.0:
particles = proposed_particles
particle_probs = proposed_particle_probs
elif self.propose_ratio == 0.0:
particles = standard_particles
particle_probs = standard_particle_probs
else:
standard_particle_probs *= (num_resampled_float / self.num_particles_float) / tf.reduce_sum(standard_particle_probs, axis=1, keep_dims=True)
proposed_particle_probs *= (num_proposed_float / self.num_particles_float) / tf.reduce_sum(proposed_particle_probs, axis=1, keep_dims=True)
particles = tf.concat([standard_particles, proposed_particles], axis=1)
particle_probs = tf.concat([standard_particle_probs, proposed_particle_probs], axis=1)
# NORMALIZE PROBABILITIES
particle_probs /= tf.reduce_sum(particle_probs, axis=1, keep_dims=True)
particle_list = tf.concat([particle_list, particles[:, tf.newaxis]], axis=1)
particle_probs_list = tf.concat([particle_probs_list, particle_probs[:, tf.newaxis]], axis=1)
return particles, particle_probs, particle_list, particle_probs_list, additional_probs_list, i + 1
# reshapes and sets the first shape sizes to None (which is necessary to keep the shape consistent in while loop)
particle_list = tf.reshape(initial_particles,
shape=[self.batch_size, -1, self.num_particles, self.state_dim])
particle_probs_list = tf.reshape(initial_particle_probs, shape=[self.batch_size, -1, self.num_particles])
additional_probs_list = tf.reshape(tf.ones([self.batch_size, self.num_particles, 4]), shape=[self.batch_size, -1, self.num_particles, 4])
# run the filtering process
particles, particle_probs, particle_list, particle_probs_list, additional_probs_list, i = tf.while_loop(
lambda *x: x[-1] < self.seq_len, loop,
[initial_particles, initial_particle_probs, particle_list, particle_probs_list, additional_probs_list,
tf.constant(1, dtype='int32')], name='loop')
# compute mean of particles
self.pred_states = self.particles_to_state(particle_list, particle_probs_list)
self.particle_list = particle_list
self.particle_probs_list = particle_probs_list
return particles, particle_probs, encodings, particle_list, particle_probs_list
def particles_to_state(self, particle_list, particle_probs_list):
mean_position = tf.reduce_sum(particle_probs_list[:, :, :, tf.newaxis] * particle_list[:, :, :, :2], axis=2)
mean_orientation = atan2(
tf.reduce_sum(particle_probs_list[:, :, :, tf.newaxis] * tf.cos(particle_list[:, :, :, 2:3]), axis=2),
tf.reduce_sum(particle_probs_list[:, :, :, tf.newaxis] * tf.sin(particle_list[:, :, :, 2:3]), axis=2))
mean_velocity = tf.reduce_sum(particle_probs_list[:, :, :, tf.newaxis] * particle_list[:, :, :, 3:5], axis=2)
return tf.concat([mean_position, mean_orientation, mean_velocity], axis=2)
def plot_motion_model(self, sess, batch, motion_samples, task, state_step_sizes):
# define the inputs and train/run the model
input_dict = {**{self.placeholders[key]: batch[key] for key in 'osa'},
**{self.placeholders['num_particles']: 100},
}
s_motion_samples = sess.run(motion_samples, input_dict)
plt.figure('Motion Model')
plt.gca().clear()
for i in range(min(len(s_motion_samples), 10)):
plt.scatter(s_motion_samples[i, :, 3] / state_step_sizes[3], s_motion_samples[i, :, 4] / state_step_sizes[4], color='blue', s=1)
plt.scatter(batch['s'][i, 0, 3] / state_step_sizes[3], batch['s'][i, 0, 4] / state_step_sizes[4], color='black', s=1)
plt.scatter(batch['s'][i, 1, 3] / state_step_sizes[3], batch['s'][i, 1, 4] / state_step_sizes[4], color='red', s=3)
plt.plot(batch['s'][i, :2, 3] / state_step_sizes[3], batch['s'][i, :2, 4] / state_step_sizes[4], color='black')
plt.xlim([0, 200])
plt.ylim([-50, 50])
plt.xlabel('translational vel')
plt.ylabel('angular vel')
plt.gca().set_aspect('equal')
plt.pause(0.01)
def plot_measurement_model(self, sess, batch_iterator, measurement_model_out):
batch = next(batch_iterator)
# define the inputs and train/run the model
input_dict = {**{self.placeholders[key]: batch[key] for key in 'osa'},
**{self.placeholders['num_particles']: 100},
}
s_measurement_model_out = sess.run([measurement_model_out], input_dict)
plt.figure('Measurement Model Output')
plt.gca().clear()
plt.imshow(s_measurement_model_out[0], interpolation="nearest", cmap="viridis_r", vmin=0.0, vmax=1.0)
plt.figure('Measurement Model Input')
plt.clf()
plt.scatter(batch['s'][:1, 0, 3], batch['s'][:1, 0, 4], marker='x', c=s_measurement_model_out[0][0,:1], vmin=0, vmax=1.0, cmap='viridis_r')
plt.scatter(batch['s'][1:, 0, 3], batch['s'][1:, 0, 4], marker='o', c=s_measurement_model_out[0][0,1:], vmin=0, vmax=1.0, cmap='viridis_r')
plt.xlabel('x_dot')
plt.ylabel('theta_dot')
plt.pause(0.01)
def plot_particle_proposer(self, sess, batch, proposed_particles, task):
# define the inputs and train/run the model
input_dict = {**{self.placeholders[key]: batch[key] for key in 'osa'},
**{self.placeholders['num_particles']: 100},
}
s_samples = sess.run(proposed_particles, input_dict)
plt.figure('Particle Proposer')
plt.gca().clear()
plot_maze(task)
for i in range(min(len(s_samples), 10)):
color = np.random.uniform(0.0, 1.0, 3)
plt.quiver(s_samples[i, :, 0], s_samples[i, :, 1], np.cos(s_samples[i, :, 2]), np.sin(s_samples[i, :, 2]), color=color, width=0.001, scale=100)
plt.quiver(batch['s'][i, 0, 0], batch['s'][i, 0, 1], np.cos(batch['s'][i, 0, 2]), np.sin(batch['s'][i, 0, 2]), color=color, scale=50, width=0.003)
plt.pause(0.01)
def plot_particle_filter(self, sess, batch, particle_list,
particle_probs_list, state_step_sizes, task):
s_states, s_particle_list, s_particle_probs_list, \
= sess.run([self.placeholders['s'], particle_list,
particle_probs_list], #self.noise_scaler1(1.0), self.noise_scaler2(2.0)],
{**{self.placeholders[key]: batch[key] for key in 'osa'},
**{self.placeholders['num_particles']: 20},
})
# print('learned motion noise factors {:.2f}/{:.2f}'.format(n1, n2))
num_steps = s_particle_list.shape[1]
for s in range(3):
plt.figure('particle_evolution, example {}'.format(s))
plt.clf()
for d in range(5):
plt.subplot(3, 2, [1, 3, 5, 2, 4][d])
for i in range(num_steps):
plt.scatter(i * np.ones_like(s_particle_list[s, i, :, d]),
s_particle_list[s, i, :, d] / (1 if s == 0 else state_step_sizes[d]),
c=s_particle_probs_list[s, i, :], cmap='viridis_r', marker='o', s=6, alpha=0.5,
linewidths=0.05,
vmin=0.0,
vmax=0.1)
current_state = batch['s'][s, i, d] / (1 if s == 0 else state_step_sizes[d])
plt.plot([i], [current_state], 'o', markerfacecolor='None', markeredgecolor='k',
markersize=2.5)
plt.xlabel('Time')
plt.ylabel('State {}'.format(d))
show_pause(pause=0.01)
| mit | 6,549,029,999,007,857,000 | 50.16409 | 255 | 0.560738 | false |
nextstrain/auspice | docs/conf.py | 1 | 2559 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Auspice'
copyright = '2020, James Hadfield, Trevor Bedford and Richard Neher'
author = 'James Hadfield, Trevor Bedford and Richard Neher'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'recommonmark',
'sphinx.ext.intersphinx',
'sphinx_markdown_tables',
'sphinxarg.ext',
'sphinx.ext.autodoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'README.md',
'narratives',
'contributing',
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'nextstrain-sphinx-theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_theme_options = {
'logo_only': False, # if True, don't display project name at top of the sidebar
'collapse_navigation': False, # if True, no [+] icons in sidebar
'titles_only': True, # if True, page subheadings not included in nav
}
# -- Cross-project references ------------------------------------------------
intersphinx_mapping = {
'docs.nextstrain.org': ('https://docs.nextstrain.org/en/latest/', None),
}
| agpl-3.0 | -8,455,335,102,820,578,000 | 34.054795 | 83 | 0.647519 | false |
westurner/pyglobalgoals | notebooks/globalgoals-pyglobalgoals.py.py | 1 | 16352 |
# coding: utf-8
# # @TheGlobalGoals for Sustainable Development
# ## Background
#
# * Homepage: **http://www.globalgoals.org/**
# - Twitter: https://twitter.com/TheGlobalGoals
# - Instagram: https://instagram.com/TheGlobalGoals/
# - Facebook: https://www.facebook.com/globalgoals.org
# - YouTube: https://www.youtube.com/channel/UCRfuAYy7MesZmgOi1Ezy0ng/
# - Hashtag: **#GlobalGoals**
# - https://twitter.com/hashtag/GlobalGoals
# - https://instagram.com/explore/tags/GlobalGoals/
# - https://www.facebook.com/hashtag/GlobalGoals
# - Hashtag: #TheGlobalGoals
# - https://twitter.com/hashtag/TheGlobalGoals
# - https://instagram.com/explore/tags/TheGlobalGoals/
# - https://www.facebook.com/hashtag/TheGlobalGoals
#
#
# ### pyglobalgoals
#
# * Homepage: https://github.com/westurner/pyglobalgoals
# * Src: https://github.com/westurner/pyglobalgoals
# * Download: https://github.com/westurner/pyglobalgoals/releases
#
# ### Objectives
#
# * [x] ENH: Read and parse TheGlobalGoals from globalgoals.org
# * [x] ENH: Download (HTTP GET) each GlobalGoal tile image to ``./notebooks/data/images/``
# * [-] ENH: Generate e.g. tweets for each GlobalGoal (e.g. **##gg17** / **##GG17**)
# * [x] ENH: Save TheGlobalGoals to a JSON-LD document
# * [-] ENH: Save TheGlobalGoals with Schema.org RDF vocabulary (as JSON-LD)
# * [-] ENH: Save TheGlobalGoals as ReStructuredText with headings and images
# * [-] ENH: Save TheGlobalGoals as Markdown with headings and images
# * [-] ENH: Save TheGlobalGoals as RDFa with headings and images
# * [ ] ENH: Save TheGlobalGoals as RDFa with images like http://globalgoals.org/
# * [-] DOC: Add narrative documentation where necessary
# * [-] REF: Refactor and extract methods from ``./notebooks/`` to ``./pyglobalgoals/``
#
# ## Implementation
#
# * Python package: [**pyglobalgoals**](#pyglobalgoals)
#
# * Jupyter notebook: **``./notebooks/globalgoals-pyglobalgoals.py.ipynb``**
# * Src: https://github.com/westurner/pyglobalgoals/blob/master/notebooks/globalgoals-pyglobalgoals.py.ipynb
# * Src: https://github.com/westurner/pyglobalgoals/blob/master/notebooks/globalgoals-pyglobalgoals.py.py
# * Src: https://github.com/westurner/pyglobalgoals/blob/develop/notebooks/globalgoals-pyglobalgoals.py.ipynb
# * Src: https://github.com/westurner/pyglobalgoals/blob/v0.1.2/notebooks/globalgoals-pyglobalgoals.py.ipynb
# * Src: https://github.com/westurner/pyglobalgoals/blob/v0.2.1/notebooks/globalgoals-pyglobalgoals.py.ipynb
#
# * [x] Download HTML with requests
# * [x] Parse HTML with beautifulsoup
# * [x] Generate JSON[-LD] with ``collections.OrderedDict``
# * [-] REF: Functional methods -> more formal type model -> ``pyglobalgoals.<...>``
#
#
# * [JSON-LD](#JSONLD) document: **``./notebooks/data/globalgoals.jsonld``**
# * Src: https://github.com/westurner/pyglobalgoals/blob/master/notebooks/data/globalgoals.jsonld
#
#
# ### JSON-LD
#
# * Wikipedia: https://en.wikipedia.org/wiki/JSON-LD
# * Homepage: http://json-ld.org/
# * Docs: http://json-ld.org/playground/
# * Hashtag: #JSONLD
#
# ### RDFa
#
# * Wikipedia: https://en.wikipedia.org/wiki/RDFa
# * Standard: http://www.w3.org/TR/rdfa-core/
# * Docs: http://www.w3.org/TR/rdfa-primer/
# * Hashtag: #RDFa
# In[1]:
#!conda install -y beautiful-soup docutils jinja2 requests
get_ipython().system(u"pip install -U beautifulsoup4 jinja2 'requests<2.8' requests-cache version-information # tweepy")
import bs4
import jinja2
import requests
import requests_cache
requests_cache.install_cache('pyglobalgoals_cache')
#!pip install -U version_information
get_ipython().magic(u'load_ext version_information')
get_ipython().magic(u'version_information jupyter, bs4, jinja2, requests, requests_cache, version_information')
# In[2]:
url = "http://www.globalgoals.org/"
req = requests.get(url)
#print(req)
#print(sorted(dir(req)))
#req.<TAB>
#req??<[Ctrl-]Enter>
if not req.ok:
raise Exception(req)
content = req.content
print(content[:20])
# In[ ]:
# In[3]:
bs = bs4.BeautifulSoup(req.content)
print(bs.prettify())
# In[4]:
tiles = bs.find_all(class_='goal-tile-wrapper')
pp(tiles)
# In[5]:
tile = tiles[0]
print(tile)
# In[6]:
link = tile.findNext('a')
img = link.findNext('img')
img_title = img['alt'][:-5]
img_src = img['src']
link_href = link['href']
example = {'name': img_title, 'img_src': img_src, 'href': link_href}
print(example)
# In[7]:
import collections
def get_data_from_goal_tile_wrapper_div(node, n=None):
link = node.findNext('a')
img = link.findNext('img')
img_title = img['alt'][:-5]
img_src = img['src']
link_href = link['href']
output = collections.OrderedDict({'@type': 'un:GlobalGoal'})
if n:
output['n'] = n
output['name'] = img_title
output['image'] = img_src
output['url'] = link_href
return output
def get_goal_tile_data(bs):
for i, tile in enumerate(bs.find_all(class_='goal-tile-wrapper'), 1):
yield get_data_from_goal_tile_wrapper_div(tile, n=i)
tiles = list(get_goal_tile_data(bs))
import json
print(json.dumps(tiles, indent=2))
goal_tiles = tiles[:-1]
# In[ ]:
# In[8]:
import codecs
from path import Path
def build_default_context():
context = collections.OrderedDict()
# context["dc"] = "http://purl.org/dc/elements/1.1/"
context["schema"] = "http://schema.org/"
# context["xsd"] = "http://www.w3.org/2001/XMLSchema#"
# context["ex"] = "http://example.org/vocab#"
# context["ex:contains"] = {
# "@type": "@id"
# }
# default attrs (alternative: prefix each with schema:)
# schema.org/Thing == schema:Thing (!= schema:thing)
context["name"] = "http://schema.org/name"
context["image"] = {
"@type": "@id",
"@id": "http://schema.org/image"
}
context["url"] = {
"@type": "@id",
"@id":"http://schema.org/url"
}
context["description"] = {
"@type": "http://schema.org/Text",
"@id": "http://schema.org/description"
}
return context
DEFAULT_CONTEXT = build_default_context()
def goal_tiles_to_jsonld(nodes, context=None, default_context=DEFAULT_CONTEXT):
data = collections.OrderedDict()
if context is None and default_context is not None:
data['@context'] = build_default_context()
elif context:
data['@context'] = context
elif default_context:
data['@context'] = default_context
data['@graph'] = nodes
return data
DATA_DIR = Path('.') / 'data'
#DATA_DIR = Path(__file__).dirname
#DATA_DIR = determine_path_to(current_notebook) # PWD initially defaults to nb.CWD
DATA_DIR.makedirs_p()
GLOBAL_GOALS_JSONLD_PATH = DATA_DIR / 'globalgoals.jsonld'
def write_global_goals_jsonld(goal_tiles, path=GLOBAL_GOALS_JSONLD_PATH):
goal_tiles_jsonld = goal_tiles_to_jsonld(goal_tiles)
with codecs.open(path, 'w', 'utf8') as fileobj:
json.dump(goal_tiles_jsonld, fileobj, indent=2)
def read_global_goals_jsonld(path=GLOBAL_GOALS_JSONLD_PATH, prettyprint=True):
with codecs.open(path, 'r', 'utf8') as fileobj:
global_goals_dict = json.load(fileobj,
object_pairs_hook=collections.OrderedDict)
return global_goals_dict
def print_json_dumps(global_goals_dict, indent=2):
print(json.dumps(global_goals_dict, indent=indent))
write_global_goals_jsonld(goal_tiles)
global_goals_dict = read_global_goals_jsonld(path=GLOBAL_GOALS_JSONLD_PATH)
assert global_goals_dict == goal_tiles_to_jsonld(goal_tiles)
print_json_dumps(global_goals_dict)
# In[9]:
def build_tweet_for_goal_tile(node):
return '##gg{n} {name} {url} {image} @TheGlobalGoals #GlobalGoals'.format(**node)
tweets = list(build_tweet_for_goal_tile(tile) for tile in goal_tiles)
tweets
# In[10]:
for node in goal_tiles:
img_basename = node['image'].split('/')[-1]
node['image_basename'] = img_basename
node['tweet_txt'] = build_tweet_for_goal_tile(node)
print(json.dumps(goal_tiles, indent=2))
# In[11]:
#!conda install -y pycurl
try:
import pycurl
except ImportError as e:
import warnings
warnings.warn(unicode(e))
def pycurl_download_file(url, dest_path, follow_redirects=True):
with open(dest_path, 'wb') as f:
c = pycurl.Curl()
c.setopt(c.URL, url)
c.setopt(c.WRITEDATA, f)
if follow_redirects:
c.setopt(c.FOLLOWLOCATION, True)
c.perform()
c.close()
return (url, dest_path)
# In[12]:
import requests
def requests_download_file(url, dest_path, **kwargs):
local_filename = url.split('/')[-1]
# NOTE the stream=True parameter
r = requests.get(url, stream=True)
with open(dest_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return (url, dest_path)
# In[13]:
import urllib
def urllib_urlretrieve_download_file(url, dest_path):
"""
* https://docs.python.org/2/library/urllib.html#urllib.urlretrieve
"""
(filename, headers) = urlllib.urlretrieve(url, dest_path)
return (url, filename)
# In[14]:
def deduplicate_on_attr(nodes, attr='image_basename'):
attrindex = collections.OrderedDict()
for node in nodes:
attrindex.setdefault(node[attr], [])
attrindex[node[attr]].append(node)
return attrindex
def check_for_key_collisions(dict_of_lists):
for name, _nodes in dict_of_lists.items():
if len(_nodes) > 1:
raise Exception(('duplicate filenames:')
(name, nodes))
attrindex = deduplicate_on_attr(goal_tiles, attr='image_basename')
check_for_key_collisions(attrindex)
#
IMG_DIR = DATA_DIR / 'images'
IMG_DIR.makedirs_p()
def download_goal_tile_images(nodes, img_path):
for node in nodes:
dest_path = img_path / node['image_basename']
source_url = node['image']
(url, dest) = requests_download_file(source_url, dest_path)
node['image_path'] = dest
print((node['n'], node['name']))
print((node['image_path']))
# time.sleep(1) # see: requests_cache
download_goal_tile_images(goal_tiles, IMG_DIR)
tiles_jsonld = goal_tiles_to_jsonld(goal_tiles)
print(json.dumps(tiles_jsonld, indent=2))
# In[15]:
#import jupyter.display as display
import IPython.display as display
display.Image(goal_tiles[0]['image_path'])
# In[16]:
import IPython.display
for tile in goal_tiles:
x = IPython.display.Image(tile['image_path'])
x
# In[17]:
import IPython.display
def display_goal_images():
for tile in goal_tiles:
yield IPython.display.Image(tile['image_path'])
x = list(display_goal_images())
#pp(x)
IPython.display.display(*x)
# In[18]:
import string
print(string.punctuation)
NOT_URI_CHARS = dict.fromkeys(string.punctuation + string.digits)
NOT_URI_CHARS.pop('-')
NOT_URI_CHARS.pop('_')
def _slugify(txt):
"""an ~approximate slugify function for human-readable URI #fragments"""
txt = txt.strip().lower()
chars = (
(c if c != ' ' else '-') for c in txt if
c not in NOT_URI_CHARS)
return u''.join(chars)
def _slugify_single_dash(txt):
"""
* unlike docutils, this function does not strip stopwords like 'and' and 'or'
TODO: locate this method in docutils
"""
def _one_dash_only(txt):
count = 0
for char in txt:
if char == '-':
count += 1
else:
if count:
yield '-'
yield char
count = 0
return u''.join(_one_dash_only(_slugify(txt)))
for node in goal_tiles:
node['name_numbered'] = "%d. %s" % (node['n'], node['name'])
node['slug_rst'] = _slugify_single_dash(node['name'])
node['slug_md'] = _slugify_single_dash(node['name'])
print_json_dumps(goal_tiles)
# In[19]:
import IPython.display
def display_goal_images():
for tile in goal_tiles:
yield IPython.display.Markdown("## %s" % tile['name_numbered'])
yield IPython.display.Image(tile['image_path'])
yield IPython.display.Markdown(tile['tweet_txt'].replace('##', '\##'))
x = list(display_goal_images())
#pp(x)
IPython.display.display(*x)
# In[20]:
TMPL_RST = """
The Global Goals
******************
.. contents::
{% for node in nodes %}
{{ node['name_numbered'] }}
======================================================
| {{ node['url'] }}
.. image:: {{ node['image'] }}{# node['image_path'] #}
:target: {{ node['url'] }}
:alt: {{ node['name'] }}
..
{{ node['tweet_txt'] }}
{% endfor %}
"""
tmpl_rst = jinja2.Template(TMPL_RST)
output_rst = tmpl_rst.render(nodes=goal_tiles)
print(output_rst)
# In[21]:
output_rst_path = DATA_DIR / 'globalgoals.rst'
with codecs.open(output_rst_path, 'w', encoding='utf-8') as f:
f.write(output_rst)
print("# wrote goals to %r" % output_rst_path)
# In[22]:
import docutils.core
output_rst_html = docutils.core.publish_string(output_rst, writer_name='html')
print(bs4.BeautifulSoup(output_rst_html).find(id='the-global-goals'))
# In[23]:
IPython.display.HTML(output_rst_html)
# In[24]:
TMPL_MD = """
# The Global Goals
**Contents:**
{% for node in nodes %}
* [{{ node['name_numbered'] }}](#{{ node['slug_md'] }})
{%- endfor %}
{% for node in nodes %}
## {{ node['name_numbered'] }}
{{ node['url'] }}
[![{{node['name_numbered']}}]({{ node['image'] }})]({{ node['url'] }})
> {{ node['tweet_txt'] }}
{% endfor %}
"""
tmpl_md = jinja2.Template(TMPL_MD)
output_markdown = tmpl_md.render(nodes=goal_tiles)
print(output_markdown)
# In[25]:
output_md_path = DATA_DIR / 'globalgoals.md'
with codecs.open(output_md_path, 'w', encoding='utf-8') as f:
f.write(output_markdown)
print("# wrote goals to %r" % output_md_path)
# In[26]:
IPython.display.Markdown(output_markdown)
# In[27]:
context = dict(nodes=goal_tiles)
# In[28]:
TMPL_HTML = """
<h1>The Global Goals</h1>
<h2>Contents:</h2>
{% for node in nodes %}
<li><a href="#{{node.slug_md}}">{{node.name_numbered}}</a></li>
{%- endfor %}
{% for node in nodes %}
<div class="goal-tile">
<h2><a name="#{{node.slug_md}}">{{ node.name_numbered }}</a></h2>
<a href="{{node.url}}">{{node.url}} </a>
<a href="{{node.url}}">
<img src="{{node.image}}" alt="{{node.name_numbered}}"/>{{node.url}} </a>
<div style="margin-left: 12px">
{{ node.tweet_txt }}
</div>
</div>
{% endfor %}
"""
tmpl_html = jinja2.Template(TMPL_HTML)
output_html = tmpl_html.render(**context)
print(output_html)
# In[29]:
output_html_path = DATA_DIR / 'globalgoals.html'
with codecs.open(output_html_path, 'w', encoding='utf-8') as f:
f.write(output_html)
print("# wrote goals to %r" % output_html_path)
# In[30]:
IPython.display.HTML(output_html)
# In[31]:
import jinja2
# TODO: prefix un:
TMPL_RDFA_HTML5 = ("""
<div prefix="schema: http://schema.org/
un: http://schema.un.org/#">
<h1>The Global Goals</h1>
<h2>Contents:</h2>
{%- for node in nodes %}
<li><a href="#{{node.slug_md}}">{{node.name_numbered}}</a></li>
{%- endfor %}
{% for node in nodes %}
<div class="goal-tile" resource="{{node.url}}" typeof="un:GlobalGoal">
<div style="display:none">
<meta property="schema:name">{{node.name}}</meta>
<meta property="schema:image">{{node.image}}</meta>
<meta property="#n">{{node.n}}</meta>
</div>
<h2><a name="#{{node.slug_md}}">{{ node.name_numbered }}</a></h2>
<a property="schema:url" href="{{node.url}}">{{node.url}} </a>
<a href="{{node.url}}">
<img src="{{node.image}}" alt="{{node.name_numbered}}"/>{{node.url}} </a>
<div style="margin-left: 12px">
{{ node.tweet_txt }}
</div>
</div>
{% endfor %}
</div>
"""
)
tmpl_rdfa_html5 = jinja2.Template(TMPL_RDFA_HTML5)
output_rdfa_html5 = tmpl_rdfa_html5.render(**context)
print(output_rdfa_html5)
# In[32]:
output_rdfa_html5_path = DATA_DIR / 'globalgoals.rdfa.html5.html'
with codecs.open(output_rdfa_html5_path, 'w', encoding='utf-8') as f:
f.write(output_rdfa_html5_path)
print("# wrote goals to %r" % output_rdfa_html5_path)
# In[33]:
IPython.display.HTML(output_rdfa_html5)
# In[34]:
# tmpl_html
# tmpl_rdfa_html5
import difflib
for line in difflib.unified_diff(
TMPL_HTML.splitlines(),
TMPL_RDFA_HTML5.splitlines()):
print(line)
| bsd-3-clause | -4,042,131,204,744,934,400 | 24.630094 | 120 | 0.635763 | false |
GoogleCloudPlatform/healthcare-deid | setup.py | 1 | 1364 | """Setup module for the healthcare_deid DLP pipeline.
All of the code necessary to run the pipeline is packaged into a source
distribution that is uploaded to the --staging_location specified on the command
line. The source distribution is then installed on the workers before they
start running.
When remotely executing the pipeline, `--setup_file path/to/setup.py` must be
added to the pipeline's command line.
"""
import os
import setuptools
# Add required python packages that should be installed over and above the
# standard DataFlow worker environment. Version restrictions are supported if
# necessary.
REQUIRED_PACKAGES = [
'apache_beam[gcp]',
'google-api-python-client',
'google-cloud-storage',
'six==1.10.0',
]
packages = ['common', 'dlp', 'physionet']
package_dir = {p: p for p in packages}
# Use eval from bazel-bin so we get the generated results_pb2.py file.
# If it doesn't exist, then the job is another pipeline that doesn't need eval.
eval_bazel_path = 'bazel-bin/eval/run_pipeline.runfiles/__main__/eval'
if os.path.exists(eval_bazel_path):
packages.append('eval')
package_dir['eval'] = eval_bazel_path
setuptools.setup(
name='healthcare_deid',
version='0.0.1',
package_dir=package_dir,
description='Healthcare Deid pipeline package.',
install_requires=REQUIRED_PACKAGES,
packages=packages)
| apache-2.0 | 6,858,331,243,784,327,000 | 32.268293 | 80 | 0.737537 | false |
SethGreylyn/gwells | gwells/migrations/0009_auto_20170711_1600_squashed_0010_auto_20170713_0917.py | 1 | 20389 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-07-13 17:57
from __future__ import unicode_literals
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
replaces = [('gwells', '0009_auto_20170711_1600'), ('gwells', '0010_auto_20170713_0917')]
dependencies = [
('gwells', '0008_auto_20170707_1158'),
]
operations = [
migrations.RemoveField(
model_name='activitysubmission',
name='created',
),
migrations.RemoveField(
model_name='activitysubmission',
name='modified',
),
migrations.RemoveField(
model_name='ltsaowner',
name='created',
),
migrations.RemoveField(
model_name='ltsaowner',
name='modified',
),
migrations.RemoveField(
model_name='well',
name='created',
),
migrations.RemoveField(
model_name='well',
name='modified',
),
migrations.AddField(
model_name='activitysubmission',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='activitysubmission',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='activitysubmission',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='activitysubmission',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='bedrockmaterial',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='bedrockmaterial',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='bedrockmaterial',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='bedrockmaterial',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='bedrockmaterialdescriptor',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='bedrockmaterialdescriptor',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='bedrockmaterialdescriptor',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='bedrockmaterialdescriptor',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='driller',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='driller',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='driller',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='driller',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='drillingcompany',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='drillingcompany',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='drillingcompany',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='drillingcompany',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='drillingmethod',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='drillingmethod',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='drillingmethod',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='drillingmethod',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='groundelevationmethod',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='groundelevationmethod',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='groundelevationmethod',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='groundelevationmethod',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='intendedwateruse',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='intendedwateruse',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='intendedwateruse',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='intendedwateruse',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='landdistrict',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='landdistrict',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='landdistrict',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='landdistrict',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologycolour',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologycolour',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologycolour',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologycolour',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologydescription',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologydescription',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologydescription',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologydescription',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologyhardness',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologyhardness',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologyhardness',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologyhardness',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologymoisture',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologymoisture',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologymoisture',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologymoisture',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologystructure',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologystructure',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologystructure',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologystructure',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologyweathering',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologyweathering',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='lithologyweathering',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='lithologyweathering',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='ltsaowner',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='ltsaowner',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='ltsaowner',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='ltsaowner',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='provincestate',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='provincestate',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='provincestate',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='provincestate',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='surficialmaterial',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='surficialmaterial',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='surficialmaterial',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='surficialmaterial',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='well',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='well',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='well',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='well',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='wellactivitytype',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='wellactivitytype',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='wellactivitytype',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='wellactivitytype',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='wellclass',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='wellclass',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='wellclass',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='wellclass',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='wellsubclass',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='wellsubclass',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='wellsubclass',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='wellsubclass',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='wellyieldunit',
name='when_created',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='wellyieldunit',
name='when_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='wellyieldunit',
name='who_created',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AddField(
model_name='wellyieldunit',
name='who_updated',
field=models.CharField(default='', max_length=30),
preserve_default=False,
),
migrations.AlterField(
model_name='activitysubmission',
name='drilling_method',
field=models.ForeignKey(db_column='drilling_method_guid', null=True, on_delete=django.db.models.deletion.CASCADE, to='gwells.DrillingMethod', verbose_name='Drilling Method'),
),
migrations.AlterField(
model_name='activitysubmission',
name='latitude',
field=models.DecimalField(decimal_places=6, max_digits=8, null=True),
),
migrations.AlterField(
model_name='activitysubmission',
name='longitude',
field=models.DecimalField(decimal_places=6, max_digits=9, null=True),
),
migrations.AlterField(
model_name='lithologydescription',
name='lithology_from',
field=models.DecimalField(decimal_places=2, max_digits=7, validators=[django.core.validators.MinValueValidator(Decimal('0.00'))], verbose_name='From'),
),
migrations.AlterField(
model_name='lithologydescription',
name='lithology_to',
field=models.DecimalField(decimal_places=2, max_digits=7, validators=[django.core.validators.MinValueValidator(Decimal('0.01'))], verbose_name='To'),
),
]
| apache-2.0 | -280,287,383,028,197,200 | 34.45913 | 186 | 0.548286 | false |
dsparrow27/zoocore | zoo/libs/command/commandui.py | 1 | 2760 | from functools import partial
from qt import QtWidgets, QtGui, QtCore
from zoo.libs import iconlib
from zoo.libs.utils import zlogging
logger = zlogging.getLogger(__name__)
class CommandActionBase(QtCore.QObject):
"""CommandUi class deals with encapsulating a command as a widget
"""
triggered = QtCore.Signal(str)
triggeredUi = QtCore.Signal(str)
def __init__(self, command):
super(CommandActionBase, self).__init__()
self.command = command
self.item = None
def create(self, parent=None):
pass
class MenuItem(CommandActionBase):
def create(self, parent=None, optionBox=False):
from maya import cmds
uiData = self.command.uiData
self.item = cmds.menuItem(label=uiData["label"], boldFont=uiData.get("bold", False), parent=parent,
italicized=uiData.get("italicized", False), command=self.emitCommand,
optionBox=optionBox)
if optionBox:
cmds.menuItem(parent=parent, optionBox=optionBox, command=self.emitCommandUi)
return self.item
def emitCommand(self, *args):
"""
:param args: dummy to deal with maya command args shit stains. basically useless
:type args: tuple
"""
self.triggered.emit(self.command.id)
def emitCommandUi(self, *args):
"""
:param args: dummy to deal with maya command args shit stains. basically useless
:type args: tuple
"""
self.triggeredUi.emit(self.command.id)
class CommandAction(CommandActionBase):
def create(self, parent=None):
uiData = self.command.uiData
self.item = QtWidgets.QWidgetAction(parent)
text = uiData.get("label", "NOLABEL")
actionLabel = QtWidgets.QLabel(text)
self.item.setDefaultWidget(actionLabel)
color = uiData.get("color", "")
backColor = uiData.get("backgroundColor", "")
if color or backColor:
actionLabel.setStyleSheet(
"QLabel {background-color: %s; color: %s;}" % (backColor,
color))
icon = uiData.get("icon")
if icon:
if isinstance(icon, QtGui.QIcon):
self.item.setIcon(icon)
else:
icon = iconlib.icon(icon)
if not icon.isNull():
self.item.setIcon(icon)
self.item.setStatusTip(uiData.get("tooltip"))
self.item.triggered.connect(partial(self.triggered.emit, self.command.id))
logger.debug("Added commandAction, {}".format(text))
return self.item
def show(self):
if self.item is not None:
self.item.show()
| gpl-3.0 | -7,807,650,875,466,042,000 | 33.5 | 107 | 0.598913 | false |
Microvellum/Fluid-Designer | win64-vc/2.78/Python/bin/2.78/scripts/addons/io_blend_utils/bl_utils/subprocess_helper.py | 1 | 5646 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
"""
Defines an operator mix-in to use for non-blocking command line access.
"""
class SubprocessHelper:
"""
Mix-in class for operators to run commands in a non-blocking way.
This uses a modal operator to manage an external process.
Subclass must define:
``command``:
List of arguments to pass to subprocess.Popen
report_interval: Time in seconds between updating reports.
``process_pre()``:
Callback that runs before the process executes.
``process_post(returncode)``:
Callback that runs when the process has ende.
returncode is -1 if the process was terminated.
Subclass may define:
``environment``:
Dict of environment variables exposed to the subprocess.
Contrary to the subprocess.Popen(env=...) parameter, this
dict is and not used to replace the existing environment
entirely, but is just used to update it.
"""
environ = {}
command = ()
@staticmethod
def _non_blocking_readlines(f, chunk=64):
"""
Iterate over lines, yielding b'' when nothings left
or when new data is not yet available.
"""
import os
from .pipe_non_blocking import (
pipe_non_blocking_set,
pipe_non_blocking_is_error_blocking,
PortableBlockingIOError,
)
fd = f.fileno()
pipe_non_blocking_set(fd)
blocks = []
while True:
try:
data = os.read(fd, chunk)
if not data:
# case were reading finishes with no trailing newline
yield b''.join(blocks)
blocks.clear()
except PortableBlockingIOError as ex:
if not pipe_non_blocking_is_error_blocking(ex):
raise ex
yield b''
continue
while True:
n = data.find(b'\n')
if n == -1:
break
yield b''.join(blocks) + data[:n + 1]
data = data[n + 1:]
blocks.clear()
blocks.append(data)
def _report_output(self):
stdout_line_iter, stderr_line_iter = self._buffer_iter
for line_iter, report_type in (
(stdout_line_iter, {'INFO'}),
(stderr_line_iter, {'WARNING'})
):
while True:
line = next(line_iter).rstrip() # rstrip all, to include \r on windows
if not line:
break
self.report(report_type, line.decode(encoding='utf-8', errors='surrogateescape'))
def _wm_enter(self, context):
wm = context.window_manager
window = context.window
self._timer = wm.event_timer_add(self.report_interval, window)
window.cursor_set('WAIT')
def _wm_exit(self, context):
wm = context.window_manager
window = context.window
wm.event_timer_remove(self._timer)
window.cursor_set('DEFAULT')
def process_pre(self):
pass
def process_post(self, returncode):
pass
def modal(self, context, event):
wm = context.window_manager
p = self._process
if event.type == 'ESC':
self.cancel(context)
self.report({'INFO'}, "Operation aborted by user")
return {'CANCELLED'}
elif event.type == 'TIMER':
if p.poll() is not None:
self._report_output()
self._wm_exit(context)
self.process_post(p.returncode)
return {'FINISHED'}
self._report_output()
return {'PASS_THROUGH'}
def execute(self, context):
import subprocess
import os
import copy
self.process_pre()
env = copy.deepcopy(os.environ)
env.update(self.environ)
try:
p = subprocess.Popen(
self.command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
)
except FileNotFoundError as ex:
# Command not found
self.report({'ERROR'}, str(ex))
return {'CANCELLED'}
self._process = p
self._buffer_iter = (
iter(self._non_blocking_readlines(p.stdout)),
iter(self._non_blocking_readlines(p.stderr)),
)
wm = context.window_manager
wm.modal_handler_add(self)
self._wm_enter(context)
return {'RUNNING_MODAL'}
def cancel(self, context):
self._wm_exit(context)
self._process.kill()
self.process_post(-1)
| gpl-3.0 | -5,265,626,018,868,350,000 | 28.873016 | 97 | 0.553135 | false |
tetra5/radiance | ui/widgets/verticallabel.py | 1 | 1370 | # -*- coding: utf-8 -*-
"""
Created on 28.01.2011
@author: vda
"""
from PyQt4 import QtCore, QtGui
class VerticalLabel(QtGui.QWidget):
def __init__(self, text, parent=None):
QtGui.QLabel.__init__(self, parent)
self.text = text
fm = QtGui.QApplication.fontMetrics()
self.width = fm.width(self.text)
self.height = fm.height()
# self.setMinimumSize(QtCore.QSize(100, 100))
# self.setMaximumSize(QtCore.QSize(100, 100))
# self.setGeometry(0, 0, 100, 100)
self.setMinimumSize(QtCore.QSize(self.width, self.height))
self.setMaximumSize(QtCore.QSize(self.width, self.height))
self.setGeometry(0, 0, self.width, self.height)
# self.update()
def paintEvent(self, event):
fm = QtGui.QApplication.fontMetrics()
painter = QtGui.QPainter()
painter.begin(self)
painter.setBrush(QtGui.QBrush(QtGui.QColor('#CCCCCC')))
painter.setPen(QtCore.Qt.NoPen)
painter.drawRect(0, 0, fm.height(), fm.width(self.text))
#painter.drawRect(0, 0, 100, 100)
painter.setPen(QtCore.Qt.black)
# painter.translate(20, 100)
painter.rotate(-90)
painter.drawText(event.rect(), QtCore.Qt.AlignCenter, self.text)
painter.end()
| mit | 5,865,820,515,146,894,000 | 26.42 | 72 | 0.586861 | false |
supermaik/selbot | Quote_Command.py | 1 | 1175 | from Quotes import Quote
from Command import Command
class Quote_Command(Command):
def __init__(self, config):
self.connection = config['connection']
self.event = config['event']
self.channel = config['channel']
pass
def resolve(self):
args = self.event.arguments[0].split()
# Don't let people skip last 10 (for voting!)
if not self.channel.quote_last_ten:
#Check if they asked for a source
if len(args) > 1:
try:
#Grab a random quote from given source
q = self.channel.quotes_list.random_quote(args[1])
except Exception:
#Invalid source name
q = Quote("your_boss", "Don't you think you should be getting back to work?")
else:
#Grab random quote from random source
q = self.channel.quotes_list.random_quote()
self.channel.last_quote = q
#Print the quote
self.respond(self.event.target, q)
pass
def respond(self, target, message):
self.connection.privmsg(target, message)
| unlicense | 5,750,818,820,420,012,000 | 34.606061 | 97 | 0.556596 | false |
ojengwa/Bookie | bookie/tests/factory.py | 1 | 2651 | """Provide tools for generating objects for testing purposes."""
from datetime import datetime
from random import randint
import random
import string
from bookie.models import DBSession
from bookie.models import Bmark
from bookie.models import Tag
from bookie.models.applog import AppLog
from bookie.models.auth import User
from bookie.models.stats import (
StatBookmark,
USER_CT,
)
def random_int(max=1000):
"""Generate a random integer value
:param max: Maximum value to hit.
"""
return randint(0, max)
def random_string(length=None):
"""Generates a random string from urandom.
:param length: Specify the number of chars in the generated string.
"""
chars = string.ascii_uppercase + string.digits
str_length = length if length is not None else random_int()
return unicode(u''.join(random.choice(chars) for x in range(str_length)))
def random_url():
"""Generate a random url that is totally bogus."""
url = u"http://{0}.com".format(random_string())
return url
def make_applog(message=None, status=None):
"""Generate applog instances."""
if status is None:
status = random_int(max=3)
if message is None:
message = random_string(100)
alog = AppLog(**{
'user': random_string(10),
'component': random_string(10),
'status': status,
'message': message,
'payload': u'',
})
return alog
def make_tag(name=None):
if not name:
name = random_string(255)
return Tag(name)
def make_bookmark(user=None):
"""Generate a fake bookmark for testing use."""
bmark = Bmark(random_url(),
username=u"admin",
desc=random_string(),
ext=random_string(),
tags=u"bookmarks")
if user:
bmark.username = user.username
bmark.user = user
DBSession.add(bmark)
DBSession.flush()
return bmark
def make_user_bookmark_count(username, data, tstamp=None):
"""Generate a fake user bookmark count for testing use"""
if tstamp is None:
tstamp = datetime.utcnow()
bmark_count = StatBookmark(tstamp=tstamp,
attrib=USER_CT.format(username),
data=data)
DBSession.add(bmark_count)
DBSession.flush()
return [bmark_count.attrib, bmark_count.data, bmark_count.tstamp]
def make_user(username=None):
"""Generate a fake user to test against."""
user = User()
if not username:
username = random_string(10)
user.username = username
DBSession.add(user)
DBSession.flush()
return user
| agpl-3.0 | 812,322,954,075,319,000 | 23.775701 | 77 | 0.632214 | false |
Jasonmk47/OpenWPM | automation/Proxy/mitm_commands.py | 1 | 4714 | # This module parses MITM Proxy requests/responses into (command, data pairs)
# This should mean that the MITMProxy code should simply pass the messages + its own data to this module
from urlparse import urlparse
import datetime
import mmh3
import json
import zlib
import os
def encode_to_unicode(msg):
"""
Tries different encodings before setting on utf8 ignoring any errors
We can likely inspect the headers for an encoding as well, though it
won't always be correct.
"""
try:
msg = unicode(msg, 'utf8')
except UnicodeDecodeError:
try:
msg = unicode(msg, 'ISO-8859-1')
except UnicodeDecodeError:
msg = unicode(msg, 'utf8', 'ignore')
return msg
def process_general_mitm_request(db_socket, browser_params, visit_id, msg):
""" Logs a HTTP request object """
referrer = msg.request.headers['referer'][0] if len(msg.request.headers['referer']) > 0 else ''
data = (browser_params['crawl_id'],
encode_to_unicode(msg.request.url),
msg.request.method,
encode_to_unicode(referrer),
json.dumps(msg.request.headers.get_state()),
visit_id,
str(datetime.datetime.now()))
db_socket.send(("INSERT INTO http_requests (crawl_id, url, method, referrer, headers, "
"visit_id, time_stamp) VALUES (?,?,?,?,?,?,?)", data))
def process_general_mitm_response(db_socket, ldb_socket, logger, browser_params, visit_id, msg):
""" Logs a HTTP response object and, if necessary, """
referrer = msg.request.headers['referer'][0] if len(msg.request.headers['referer']) > 0 else ''
location = msg.response.headers['location'][0] if len(msg.response.headers['location']) > 0 else ''
content_hash = save_javascript_content(ldb_socket, logger, browser_params, msg)
data = (browser_params['crawl_id'],
encode_to_unicode(msg.request.url),
encode_to_unicode(msg.request.method),
encode_to_unicode(referrer),
msg.response.code,
msg.response.msg,
json.dumps(msg.response.headers.get_state()),
encode_to_unicode(location),
visit_id,
str(datetime.datetime.now()),
content_hash)
db_socket.send(("INSERT INTO http_responses (crawl_id, url, method, referrer, response_status, "
"response_status_text, headers, location, visit_id, time_stamp, content_hash) VALUES (?,?,?,?,?,?,?,?,?,?,?)", data))
def save_javascript_content(ldb_socket, logger, browser_params, msg):
""" Save javascript files de-duplicated and compressed on disk """
if not browser_params['save_javascript']:
return
# Check if this response is javascript content
is_js = False
if (len(msg.response.headers['Content-Type']) > 0 and
'javascript' in msg.response.headers['Content-Type'][0]):
is_js = True
if not is_js and urlparse(msg.request.url).path.split('.')[-1] == 'js':
is_js = True
if not is_js:
return
# Decompress any content with compression
# We want files to hash to the same value
# Firefox currently only accepts gzip/deflate
script = ''
content_encoding = msg.response.headers['Content-Encoding']
if (len(content_encoding) == 0 or
content_encoding[0].lower() == 'utf-8' or
content_encoding[0].lower() == 'identity' or
content_encoding[0].lower() == 'none' or
content_encoding[0].lower() == 'ansi_x3.4-1968' or
content_encoding[0].lower() == 'utf8' or
content_encoding[0] == ''):
script = msg.response.content
elif 'gzip' in content_encoding[0].lower():
try:
script = zlib.decompress(msg.response.content, zlib.MAX_WBITS|16)
except zlib.error as e:
logger.error('BROWSER %i: Received zlib error when trying to decompress gzipped javascript: %s' % (browser_params['crawl_id'],str(e)))
return
elif 'deflate' in content_encoding[0].lower():
try:
script = zlib.decompress(msg.response.content, -zlib.MAX_WBITS)
except zlib.error as e:
logger.error('BROWSER %i: Received zlib error when trying to decompress deflated javascript: %s' % (browser_params['crawl_id'],str(e)))
return
else:
logger.error('BROWSER %i: Received Content-Encoding %s. Not supported by Firefox, skipping archive.' % (browser_params['crawl_id'], str(content_encoding)))
return
ldb_socket.send(script)
# Hash script for deduplication on disk
hasher = mmh3.hash128
script_hash = str(hasher(script) >> 64)
return script_hash
| gpl-3.0 | 2,147,477,469,874,632,200 | 39.637931 | 163 | 0.627068 | false |
Sharecare/cyclops | app/httpreq.py | 1 | 4917 | import urllib
import urllib2
import urlparse
import socket
import time
import json
import sys
import logging
logger = logging.getLogger(__name__)
import pprint
pp = pprint.PrettyPrinter(indent=4)
# we need to make sure we don't follow redirects so build a new opener
class NoRedirection(urllib2.HTTPErrorProcessor):
def http_response(self, request, response):
return response
https_response = http_response
# by default, urllib2 only deals with GET and POST
# so we subclass it and make it handle other methods
class RequestWithMethod(urllib2.Request):
def __init__(self, url, method, data=None, headers={}, origin_req_host=None, unverifiable=False):
self._method = method
# build up a copy of the full request
u = urlparse.urlparse(url)
self._the_request = "%s %s HTTP/1.1\n" % (method, u.path)
for h in headers:
self._the_request += "%s: %s\n" % (h, headers[h])
self._the_request += "\n"
if data:
self._the_request += data
urllib2.Request.__init__(self, url, data, headers, origin_req_host, unverifiable)
def get_method(self):
if self._method:
return self._method
else:
return urllib2.Request.get_method(self)
class HTTPReq():
def __init__(self, timeout=10):
self.timeout = timeout
self.AcceptTypes = {}
self.AcceptTypes['json'] = 'application/json'
self.AcceptTypes['xml'] = 'application/xml'
self.AcceptTypes['text'] = 'text/plain'
self.AcceptTypes['csv'] = 'text/csv'
def accept2type(self, accept):
for k in self.AcceptTypes:
try:
if self.AcceptTypes[k] == accept:
return(k)
except:
pass
return('json')
def _query(self, req):
start = end = 0
code = -1
rheaders = {}
ret = None
retheaders = None
try:
opener = urllib2.build_opener(NoRedirection)
except Exception, e:
logger.exception(e)
sys.exit(0)
try:
start = time.time()
response = opener.open(req, timeout=self.timeout)
end = time.time()
code = response.code
retheaders = response.info()
except urllib2.URLError, e:
if hasattr(e, 'reason'):
logger.exception(e)
ret = str(e.reason)
else:
code = e.code
retheaders = e.info()
ret = e.read()
raise e
except IOError, e:
if hasattr(e, 'reason'):
reason = e.reason
elif hasattr(e, 'code'):
code = e.code
rheaders = e.info()
else:
logger.exception(e)
raise e
try:
ret = response.read()
except:
pass
try:
for r in retheaders.items():
rheaders[r[0].lower()] = r[1]
except:
pass
#return dict(content=ret.decode('ascii', errors='ignore'), status=code, headers=rheaders, speed=(end - start), request=req._the_request)
return dict(content=ret, status=code, headers=rheaders, speed=(end - start), request=req._the_request)
def get(self, url, data=None, headers={}, type=None):
req = None
try:
if self.AcceptTypes[type]:
headers['Accept'] = self.AcceptTypes[type]
headers['Content-Type'] = self.AcceptTypes[type]
req = RequestWithMethod(url, 'GET', headers=headers)
except:
req = RequestWithMethod(url, 'GET', headers=headers)
return(self._query(req))
def post(self, url, data, headers={}, type=None):
req = None
try:
if self.AcceptTypes[type]:
headers['Accept'] = self.AcceptTypes[type]
headers['Content-Type'] = self.AcceptTypes[type]
req = RequestWithMethod(url, 'POST', data=data, headers=headers)
except Exception, e:
req = RequestWithMethod(url, 'POST', data=data, headers=headers)
#logger.exception(e)
return(self._query(req))
def delete(self, url, data=None, headers={}, type=None):
req = None
try:
if self.AcceptTypes[type]:
headers['Accept'] = self.AcceptTypes[type]
headers['Content-Type'] = self.AcceptTypes[type]
req = RequestWithMethod(url, 'DELETE', headers=headers)
except:
req = RequestWithMethod(url, 'DELETE', headers=headers)
return(self._query(req))
def put(self, url, data, headers={}, type=None):
req = None
try:
if self.AcceptTypes[type]:
headers['Accept'] = self.AcceptTypes[type]
headers['Content-Type'] = self.AcceptTypes[type]
req = RequestWithMethod(url, 'PUT', data=data, headers=headers)
except:
req = RequestWithMethod(url, 'PUT', data=data, headers=headers)
return(self._query(req))
| apache-2.0 | -4,183,367,405,335,792,600 | 28.620482 | 142 | 0.580232 | false |
jiaphuan/models | research/astronet/astronet/astro_model/astro_model.py | 1 | 10261 | # Copyright 2018 The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A TensorFlow model for identifying exoplanets in astrophysical light curves.
AstroModel is a concrete base class for models that identify exoplanets in
astrophysical light curves. This class implements a simple linear model that can
be extended by subclasses.
The general framework for AstroModel and its subclasses is as follows:
* Model inputs:
- Zero or more time_series_features (e.g. astrophysical light curves)
- Zero or more aux_features (e.g. orbital period, transit duration)
* Labels:
- An integer feature with 2 or more values (eg. 0 = Not Planet, 1 = Planet)
* Model outputs:
- The predicted probabilities for each label
* Architecture:
predictions
^
|
logits
^
|
(pre_logits_hidden_layers)
^
|
pre_logits_concat
^
|
(concatenate)
^ ^
| |
(time_series_hidden_layers) (aux_hidden_layers)
^ ^
| |
time_series_features aux_features
Subclasses will typically override the build_time_series_hidden_layers()
and/or build_aux_hidden_layers() functions. For example, a subclass could
override build_time_series_hidden_layers() to apply convolutional layers to the
time series features. In this class, those functions are simple concatenations
of the input features.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
import tensorflow as tf
class AstroModel(object):
"""A TensorFlow model for classifying astrophysical light curves."""
def __init__(self, features, labels, hparams, mode):
"""Basic setup. The actual TensorFlow graph is constructed in build().
Args:
features: A dictionary containing "time_series_features" and
"aux_features", each of which is a dictionary of named input Tensors.
All features have dtype float32 and shape [batch_size, length].
labels: An int64 Tensor with shape [batch_size]. May be None if mode is
tf.estimator.ModeKeys.PREDICT.
hparams: A ConfigDict of hyperparameters for building the model.
mode: A tf.estimator.ModeKeys to specify whether the graph should be built
for training, evaluation or prediction.
Raises:
ValueError: If mode is invalid.
"""
valid_modes = [
tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL,
tf.estimator.ModeKeys.PREDICT
]
if mode not in valid_modes:
raise ValueError("Expected mode in %s. Got: %s" % (valid_modes, mode))
self.hparams = hparams
self.mode = mode
# A dictionary of input Tensors. Values have dtype float32 and shape
# [batch_size, length].
self.time_series_features = features.get("time_series_features", {})
# A dictionary of input Tensors. Values have dtype float32 and shape
# [batch_size, length].
self.aux_features = features.get("aux_features", {})
# An int32 Tensor with shape [batch_size]. May be None if mode is
# tf.estimator.ModeKeys.PREDICT.
self.labels = labels
# Optional Tensor; the weights corresponding to self.labels.
self.weights = features.get("weights")
# A Python boolean or a scalar boolean Tensor. Indicates whether the model
# is in training mode for the purpose of graph ops, such as dropout. (Since
# this might be a Tensor, its value is defined in build()).
self.is_training = None
# Global step Tensor.
self.global_step = None
# A dictionary of float32 Tensors with shape [batch_size, layer_size]; the
# outputs of the time series hidden layers.
self.time_series_hidden_layers = {}
# A dictionary of float32 Tensors with shape [batch_size, layer_size]; the
# outputs of the auxiliary hidden layers.
self.aux_hidden_layers = {}
# A float32 Tensor with shape [batch_size, layer_size]; the concatenation of
# outputs from the hidden layers.
self.pre_logits_concat = None
# A float32 Tensor with shape [batch_size, output_dim].
self.logits = None
# A float32 Tensor with shape [batch_size, output_dim].
self.predictions = None
# A float32 Tensor with shape [batch_size]; the cross-entropy losses for the
# current batch.
self.batch_losses = None
# Scalar Tensor; the total loss for the trainer to optimize.
self.total_loss = None
def build_time_series_hidden_layers(self):
"""Builds hidden layers for the time series features.
Inputs:
self.time_series_features
Outputs:
self.time_series_hidden_layers
"""
# No hidden layers.
self.time_series_hidden_layers = self.time_series_features
def build_aux_hidden_layers(self):
"""Builds hidden layers for the auxiliary features.
Inputs:
self.aux_features
Outputs:
self.aux_hidden_layers
"""
# No hidden layers.
self.aux_hidden_layers = self.aux_features
def build_logits(self):
"""Builds the model logits.
Inputs:
self.aux_hidden_layers
self.time_series_hidden_layers
Outputs:
self.pre_logits_concat
self.logits
Raises:
ValueError: If self.time_series_hidden_layers and self.aux_hidden_layers
are both empty.
"""
# Sort the hidden layers by name because the order of dictionary items is
# nondeterministic between invocations of Python.
time_series_hidden_layers = sorted(
self.time_series_hidden_layers.items(), key=operator.itemgetter(0))
aux_hidden_layers = sorted(
self.aux_hidden_layers.items(), key=operator.itemgetter(0))
hidden_layers = time_series_hidden_layers + aux_hidden_layers
if not hidden_layers:
raise ValueError("At least one time series hidden layer or auxiliary "
"hidden layer is required.")
# Concatenate the hidden layers.
if len(hidden_layers) == 1:
pre_logits_concat = hidden_layers[0][1]
else:
pre_logits_concat = tf.concat(
[layer[1] for layer in hidden_layers],
axis=1,
name="pre_logits_concat")
net = pre_logits_concat
with tf.variable_scope("pre_logits_hidden"):
for i in range(self.hparams.num_pre_logits_hidden_layers):
net = tf.layers.dense(
inputs=net,
units=self.hparams.pre_logits_hidden_layer_size,
activation=tf.nn.relu,
name="fully_connected_%s" % (i + 1))
if self.hparams.pre_logits_dropout_rate > 0:
net = tf.layers.dropout(
net,
self.hparams.pre_logits_dropout_rate,
training=self.is_training)
# Identify the final pre-logits hidden layer as "pre_logits_hidden/final".
tf.identity(net, "final")
logits = tf.layers.dense(
inputs=net, units=self.hparams.output_dim, name="logits")
self.pre_logits_concat = pre_logits_concat
self.logits = logits
def build_predictions(self):
"""Builds the output predictions and losses.
Inputs:
self.logits
Outputs:
self.predictions
"""
# Use sigmoid activation function for binary classification, or softmax for
# multi-class classification.
prediction_fn = (
tf.sigmoid if self.hparams.output_dim == 1 else tf.nn.softmax)
predictions = prediction_fn(self.logits, name="predictions")
self.predictions = predictions
def build_losses(self):
"""Builds the training losses.
Inputs:
self.logits
self.labels
Outputs:
self.batch_losses
self.total_loss
"""
if self.hparams.output_dim == 1:
# Binary classification.
batch_losses = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.to_float(self.labels), logits=tf.squeeze(self.logits, [1]))
else:
# Multi-class classification.
batch_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=self.labels, logits=self.logits)
# Compute the weighted mean cross entropy loss and add it to the LOSSES
# collection.
weights = self.weights if self.weights is not None else 1.0
tf.losses.compute_weighted_loss(
losses=batch_losses,
weights=weights,
reduction=tf.losses.Reduction.MEAN)
# Compute the total loss, including any other losses added to the LOSSES
# collection (e.g. regularization losses).
total_loss = tf.losses.get_total_loss()
self.batch_losses = batch_losses
self.total_loss = total_loss
def build(self):
"""Creates all ops for training, evaluation or inference."""
self.global_step = tf.train.get_or_create_global_step()
if self.mode == tf.estimator.ModeKeys.TRAIN:
# This is implemented as a placeholder Tensor, rather than a constant, to
# allow its value to be feedable during training (e.g. to disable dropout
# when performing in-process validation set evaluation).
self.is_training = tf.placeholder_with_default(True, [], "is_training")
else:
self.is_training = False
self.build_time_series_hidden_layers()
self.build_aux_hidden_layers()
self.build_logits()
self.build_predictions()
if self.mode in [tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL]:
self.build_losses()
| apache-2.0 | 8,468,219,150,089,580,000 | 32.753289 | 80 | 0.645941 | false |
polyaxon/polyaxon | sdks/python/http_client/v1/polyaxon_sdk/models/v1_k8s_resource_type.py | 1 | 5346 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.10.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1K8sResourceType(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'schema': 'V1K8sResourceSchema',
'is_requested': 'bool'
}
attribute_map = {
'name': 'name',
'schema': 'schema',
'is_requested': 'isRequested'
}
def __init__(self, name=None, schema=None, is_requested=None, local_vars_configuration=None): # noqa: E501
"""V1K8sResourceType - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._schema = None
self._is_requested = None
self.discriminator = None
if name is not None:
self.name = name
if schema is not None:
self.schema = schema
if is_requested is not None:
self.is_requested = is_requested
@property
def name(self):
"""Gets the name of this V1K8sResourceType. # noqa: E501
:return: The name of this V1K8sResourceType. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1K8sResourceType.
:param name: The name of this V1K8sResourceType. # noqa: E501
:type: str
"""
self._name = name
@property
def schema(self):
"""Gets the schema of this V1K8sResourceType. # noqa: E501
:return: The schema of this V1K8sResourceType. # noqa: E501
:rtype: V1K8sResourceSchema
"""
return self._schema
@schema.setter
def schema(self, schema):
"""Sets the schema of this V1K8sResourceType.
:param schema: The schema of this V1K8sResourceType. # noqa: E501
:type: V1K8sResourceSchema
"""
self._schema = schema
@property
def is_requested(self):
"""Gets the is_requested of this V1K8sResourceType. # noqa: E501
:return: The is_requested of this V1K8sResourceType. # noqa: E501
:rtype: bool
"""
return self._is_requested
@is_requested.setter
def is_requested(self, is_requested):
"""Sets the is_requested of this V1K8sResourceType.
:param is_requested: The is_requested of this V1K8sResourceType. # noqa: E501
:type: bool
"""
self._is_requested = is_requested
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1K8sResourceType):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1K8sResourceType):
return True
return self.to_dict() != other.to_dict()
| apache-2.0 | 4,836,035,765,302,610,000 | 27.285714 | 111 | 0.588664 | false |
pawelmhm/splash | splash/network_manager.py | 1 | 17016 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import base64
import itertools
import functools
from datetime import datetime
import traceback
from PyQt5.QtCore import QByteArray, QTimer
from PyQt5.QtNetwork import (
QNetworkAccessManager,
QNetworkProxyQuery,
QNetworkRequest,
QNetworkReply
)
from twisted.python import log
from splash.qtutils import qurl2ascii, REQUEST_ERRORS, get_request_webframe
from splash.request_middleware import (
AdblockMiddleware,
AllowedDomainsMiddleware,
AllowedSchemesMiddleware,
RequestLoggingMiddleware,
AdblockRulesRegistry,
ResourceTimeoutMiddleware,
ResponseBodyTrackingMiddleware,
)
from splash.response_middleware import ContentTypeMiddleware
from splash import defaults
from splash.utils import to_bytes
from splash.cookies import SplashCookieJar
class NetworkManagerFactory(object):
def __init__(self, filters_path=None, verbosity=None, allowed_schemes=None):
verbosity = defaults.VERBOSITY if verbosity is None else verbosity
self.verbosity = verbosity
self.request_middlewares = []
self.response_middlewares = []
self.adblock_rules = None
# Initialize request and response middlewares
allowed_schemes = (defaults.ALLOWED_SCHEMES if allowed_schemes is None
else allowed_schemes.split(','))
if allowed_schemes:
self.request_middlewares.append(
AllowedSchemesMiddleware(allowed_schemes, verbosity=verbosity)
)
if self.verbosity >= 2:
self.request_middlewares.append(RequestLoggingMiddleware())
self.request_middlewares.append(AllowedDomainsMiddleware(verbosity=verbosity))
self.request_middlewares.append(ResourceTimeoutMiddleware())
self.request_middlewares.append(ResponseBodyTrackingMiddleware())
if filters_path is not None:
self.adblock_rules = AdblockRulesRegistry(filters_path, verbosity=verbosity)
self.request_middlewares.append(
AdblockMiddleware(self.adblock_rules, verbosity=verbosity)
)
self.response_middlewares.append(ContentTypeMiddleware(self.verbosity))
def __call__(self):
manager = SplashQNetworkAccessManager(
request_middlewares=self.request_middlewares,
response_middlewares=self.response_middlewares,
verbosity=self.verbosity,
)
manager.setCache(None)
return manager
class ProxiedQNetworkAccessManager(QNetworkAccessManager):
"""
QNetworkAccessManager subclass with extra features. It
* Enables "splash proxy factories" support. Qt provides similar
functionality via setProxyFactory method, but standard
QNetworkProxyFactory is not flexible enough.
* Sets up extra logging.
* Provides a way to get the "source" request (that was made to Splash
itself).
* Tracks information about requests/responses and stores it in HAR format,
including response content.
* Allows to set per-request timeouts.
"""
_REQUEST_ID = QNetworkRequest.User + 1
_SHOULD_TRACK = QNetworkRequest.User + 2
def __init__(self, verbosity):
super(ProxiedQNetworkAccessManager, self).__init__()
self.sslErrors.connect(self._on_ssl_errors)
self.finished.connect(self._on_finished)
self.verbosity = verbosity
self._reply_timeout_timers = {} # requestId => timer
self._default_proxy = self.proxy()
self.cookiejar = SplashCookieJar(self)
self.setCookieJar(self.cookiejar)
self._response_bodies = {} # requestId => response content
self._request_ids = itertools.count()
assert self.proxyFactory() is None, "Standard QNetworkProxyFactory is not supported"
def _on_ssl_errors(self, reply, errors):
reply.ignoreSslErrors()
def _on_finished(self, reply):
reply.deleteLater()
def createRequest(self, operation, request, outgoingData=None):
"""
This method is called when a new request is sent;
it must return a reply object to work with.
"""
start_time = datetime.utcnow()
# Proxies are managed per-request, so we're restoring a default
# before each request. This assumes all requests go through
# this method.
self._clear_proxy()
request, req_id = self._wrap_request(request)
self._handle_custom_headers(request)
self._handle_request_cookies(request)
self._run_webpage_callbacks(request, 'on_request',
request, operation, outgoingData)
self._handle_custom_proxies(request)
self._handle_request_response_tracking(request)
har = self._get_har(request)
if har is not None:
har.store_new_request(
req_id=req_id,
start_time=start_time,
operation=operation,
request=request,
outgoingData=outgoingData,
)
reply = super(ProxiedQNetworkAccessManager, self).createRequest(
operation, request, outgoingData
)
if hasattr(request, 'timeout'):
timeout = request.timeout * 1000
if timeout:
self._set_reply_timeout(reply, timeout)
if har is not None:
har.store_new_reply(req_id, reply)
reply.error.connect(self._on_reply_error)
reply.finished.connect(self._on_reply_finished)
if self._should_track_content(request):
self._response_bodies[req_id] = QByteArray()
reply.readyRead.connect(self._on_reply_ready_read)
reply.metaDataChanged.connect(self._on_reply_headers)
reply.downloadProgress.connect(self._on_reply_download_progress)
return reply
def _set_reply_timeout(self, reply, timeout_ms):
request_id = self._get_request_id(reply.request())
# reply is used as a parent for the timer in order to destroy
# the timer when reply is destroyed. It segfaults otherwise.
timer = QTimer(reply)
timer.setSingleShot(True)
timer_callback = functools.partial(self._on_reply_timeout,
reply=reply,
timer=timer,
request_id=request_id)
timer.timeout.connect(timer_callback)
self._reply_timeout_timers[request_id] = timer
timer.start(timeout_ms)
def _on_reply_timeout(self, reply, timer, request_id):
self._reply_timeout_timers.pop(request_id)
self.log("timed out, aborting: {url}", reply, min_level=1)
# FIXME: set proper error code
reply.abort()
def _cancel_reply_timer(self, reply):
request_id = self._get_request_id(reply.request())
timer = self._reply_timeout_timers.pop(request_id, None)
if timer and timer.isActive():
timer.stop()
def _clear_proxy(self):
""" Init default proxy """
self.setProxy(self._default_proxy)
def _wrap_request(self, request):
req = QNetworkRequest(request)
req_id = next(self._request_ids)
req.setAttribute(self._REQUEST_ID, req_id)
for attr in ['timeout', 'track_response_body']:
if hasattr(request, attr):
setattr(req, attr, getattr(request, attr))
return req, req_id
def _handle_custom_proxies(self, request):
proxy = None
# proxies set in proxy profiles or `proxy` HTTP argument
splash_proxy_factory = self._get_webpage_attribute(request, 'splash_proxy_factory')
if splash_proxy_factory:
proxy_query = QNetworkProxyQuery(request.url())
proxy = splash_proxy_factory.queryProxy(proxy_query)[0]
self.setProxy(proxy)
# proxies set in on_request
if hasattr(request, 'custom_proxy'):
proxy = request.custom_proxy
self.setProxy(proxy)
# Handle proxy auth. We're setting Proxy-Authorization header
# explicitly because Qt loves to cache proxy credentials.
if proxy is None:
return
user, password = proxy.user(), proxy.password()
if not user and not password:
return
auth = b"Basic " + base64.b64encode("{}:{}".format(user, password).encode("utf-8"))
request.setRawHeader(b"Proxy-Authorization", auth)
def _handle_custom_headers(self, request):
if self._get_webpage_attribute(request, "skip_custom_headers"):
# XXX: this hack assumes that new requests between
# BrowserTab._create_request and this function are not possible,
# i.e. we don't give control to the event loop in between.
# Unfortunately we can't store this flag on a request itself
# because a new QNetworkRequest instance is created by QWebKit.
self._set_webpage_attribute(request, "skip_custom_headers", False)
return
headers = self._get_webpage_attribute(request, "custom_headers")
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers or []:
request.setRawHeader(to_bytes(name), to_bytes(value))
def _handle_request_cookies(self, request):
self.cookiejar.update_cookie_header(request)
def _handle_reply_cookies(self, reply):
self.cookiejar.fill_from_reply(reply)
def _handle_request_response_tracking(self, request):
track = getattr(request, 'track_response_body', False)
request.setAttribute(self._SHOULD_TRACK, track)
def _should_track_content(self, request):
return request.attribute(self._SHOULD_TRACK)
def _get_request_id(self, request=None):
if request is None:
request = self.sender().request()
return request.attribute(self._REQUEST_ID)
def _get_har(self, request=None):
"""
Return HarBuilder instance.
:rtype: splash.har_builder.HarBuilder | None
"""
if request is None:
request = self.sender().request()
return self._get_webpage_attribute(request, "har")
def _get_webpage_attribute(self, request, attribute):
web_frame = get_request_webframe(request)
if web_frame:
return getattr(web_frame.page(), attribute, None)
def _set_webpage_attribute(self, request, attribute, value):
web_frame = get_request_webframe(request)
if web_frame:
return setattr(web_frame.page(), attribute, value)
def _on_reply_error(self, error_id):
self._response_bodies.pop(self._get_request_id(), None)
if error_id != QNetworkReply.OperationCanceledError:
error_msg = REQUEST_ERRORS.get(error_id, 'unknown error')
self.log('Download error %d: %s ({url})' % (error_id, error_msg),
self.sender(), min_level=2)
def _on_reply_ready_read(self):
reply = self.sender()
self._store_response_chunk(reply)
def _store_response_chunk(self, reply):
req_id = self._get_request_id(reply.request())
if req_id not in self._response_bodies:
self.log("Internal problem in _store_response_chunk: "
"request %s is not tracked" % req_id, reply, min_level=1)
return
chunk = reply.peek(reply.bytesAvailable())
self._response_bodies[req_id].append(chunk)
def _on_reply_finished(self):
reply = self.sender()
request = reply.request()
self._cancel_reply_timer(reply)
har = self._get_har()
har_entry, content = None, None
if har is not None:
req_id = self._get_request_id()
# FIXME: what if har is None? When can it be None?
# Who removes the content from self._response_bodies dict?
content = self._response_bodies.pop(req_id, None)
if content is not None:
content = bytes(content)
# FIXME: content is kept in memory at least twice,
# as raw data and as a base64-encoded copy.
har.store_reply_finished(req_id, reply, content)
har_entry = har.get_entry(req_id)
# We're passing HAR entry to the callbacks because reply object
# itself doesn't have all information.
# Content is passed in order to avoid decoding it from base64.
self._run_webpage_callbacks(request, "on_response", reply, har_entry,
content)
self.log("Finished downloading {url}", reply)
def _on_reply_headers(self):
"""Signal emitted before reading response body, after getting headers
"""
reply = self.sender()
request = reply.request()
self._handle_reply_cookies(reply)
self._run_webpage_callbacks(request, "on_response_headers", reply)
har = self._get_har()
if har is not None:
har.store_reply_headers_received(self._get_request_id(request), reply)
self.log("Headers received for {url}", reply, min_level=3)
def _on_reply_download_progress(self, received, total):
har = self._get_har()
if har is not None:
req_id = self._get_request_id()
har.store_reply_download_progress(req_id, received, total)
if total == -1:
total = '?'
self.log("Downloaded %d/%s of {url}" % (received, total),
self.sender(), min_level=4)
def _on_reply_upload_progress(self, sent, total):
# FIXME: is it used?
har = self._get_har()
if har is not None:
req_id = self._get_request_id()
har.store_request_upload_progress(req_id, sent, total)
if total == -1:
total = '?'
self.log("Uploaded %d/%s of {url}" % (sent, total),
self.sender(), min_level=4)
def _get_render_options(self, request):
return self._get_webpage_attribute(request, 'render_options')
def _run_webpage_callbacks(self, request, event_name, *args):
callbacks = self._get_webpage_attribute(request, "callbacks")
if not callbacks:
return
for cb in callbacks.get(event_name, []):
try:
cb(*args)
except:
# TODO unhandled exceptions in lua callbacks
# should we raise errors here?
# https://github.com/scrapinghub/splash/issues/161
self.log("error in %s callback" % event_name, min_level=1)
self.log(traceback.format_exc(), min_level=1, format_msg=False)
def log(self, msg, reply=None, min_level=2, format_msg=True):
if self.verbosity < min_level:
return
if not reply:
url = ''
else:
url = qurl2ascii(reply.url())
if not url:
return
if format_msg:
msg = msg.format(url=url)
log.msg(msg, system='network-manager')
class SplashQNetworkAccessManager(ProxiedQNetworkAccessManager):
"""
This QNetworkAccessManager provides:
* proxy support;
* request middleware support;
* additional logging.
"""
def __init__(self, request_middlewares, response_middlewares, verbosity):
super(SplashQNetworkAccessManager, self).__init__(verbosity=verbosity)
self.request_middlewares = request_middlewares
self.response_middlewares = response_middlewares
def run_response_middlewares(self):
reply = self.sender()
reply.metaDataChanged.disconnect(self.run_response_middlewares)
render_options = self._get_render_options(reply.request())
if render_options:
try:
for middleware in self.response_middlewares:
middleware.process(reply, render_options)
except:
self.log("internal error in response middleware", min_level=1)
self.log(traceback.format_exc(), min_level=1, format_msg=False)
def createRequest(self, operation, request, outgoingData=None):
# XXX: This method MUST return a reply, otherwise PyQT segfaults.
render_options = self._get_render_options(request)
if render_options:
try:
for middleware in self.request_middlewares:
request = middleware.process(request, render_options, operation, outgoingData)
except:
self.log("internal error in request middleware", min_level=1)
self.log(traceback.format_exc(), min_level=1, format_msg=False)
reply = super(SplashQNetworkAccessManager, self).createRequest(operation, request, outgoingData)
if render_options:
reply.metaDataChanged.connect(self.run_response_middlewares)
return reply
| bsd-3-clause | 1,547,243,669,720,539,400 | 37.497738 | 104 | 0.622297 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.