repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
zjuchenyuan/BioWeb | Lib/Bio/Blast/NCBIStandalone.py | 1 | 74989 | # Copyright 1999-2000 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
# Patches by Mike Poidinger to support multiple databases.
# Updated by Peter Cock in 2007 to do a better job on BLAST 2.2.15
"""Code for calling standalone BLAST and parsing plain text output (DEPRECATED).
Rather than parsing the human readable plain text BLAST output (which seems to
change with every update to BLAST), we and the NBCI recommend you parse the
XML output instead. The plain text parser in this module still works at the
time of writing, but is considered obsolete and updating it to cope with the
latest versions of BLAST is not a priority for us.
This module also provides code to work with the "legacy" standalone version of
NCBI BLAST, tools blastall, rpsblast and blastpgp via three helper functions of
the same name. These functions are very limited for dealing with the output as
files rather than handles, for which the wrappers in Bio.Blast.Applications are
preferred. Furthermore, the NCBI themselves regard these command line tools as
"legacy", and encourage using the new BLAST+ tools instead. Biopython has
wrappers for these under Bio.Blast.Applications (see the tutorial).
"""
from __future__ import print_function
import sys
import re
from Bio._py3k import StringIO
from Bio.ParserSupport import AbstractParser, AbstractConsumer
from Bio.ParserSupport import read_and_call, read_and_call_until
from Bio.ParserSupport import read_and_call_while, attempt_read_and_call
from Bio.ParserSupport import is_blank_line, safe_peekline, safe_readline
from Bio import File
from Bio.Blast import Record
from Bio import BiopythonDeprecationWarning
import warnings
warnings.warn("This module has been deprecated. Consider Bio.SearchIO for "
"parsing BLAST output instead.", BiopythonDeprecationWarning)
_score_e_re = re.compile(r'Score +E')
class LowQualityBlastError(Exception):
"""Error caused by running a low quality sequence through BLAST.
When low quality sequences (like GenBank entries containing only
stretches of a single nucleotide) are BLASTed, they will result in
BLAST generating an error and not being able to perform the BLAST.
search. This error should be raised for the BLAST reports produced
in this case.
"""
pass
class ShortQueryBlastError(Exception):
"""Error caused by running a short query sequence through BLAST.
If the query sequence is too short, BLAST outputs warnings and errors::
Searching[blastall] WARNING: [000.000] AT1G08320: SetUpBlastSearch failed.
[blastall] ERROR: [000.000] AT1G08320: Blast:
[blastall] ERROR: [000.000] AT1G08320: Blast: Query must be at least wordsize
done
This exception is raised when that condition is detected.
"""
pass
class _Scanner(object):
"""Scan BLAST output from blastall or blastpgp.
Tested with blastall and blastpgp v2.0.10, v2.0.11
Methods:
- feed Feed data into the scanner.
"""
def feed(self, handle, consumer):
"""S.feed(handle, consumer)
Feed in a BLAST report for scanning. handle is a file-like
object that contains the BLAST report. consumer is a Consumer
object that will receive events as the report is scanned.
"""
if isinstance(handle, File.UndoHandle):
uhandle = handle
else:
uhandle = File.UndoHandle(handle)
# Try to fast-forward to the beginning of the blast report.
read_and_call_until(uhandle, consumer.noevent, contains='BLAST')
# Now scan the BLAST report.
self._scan_header(uhandle, consumer)
self._scan_rounds(uhandle, consumer)
self._scan_database_report(uhandle, consumer)
self._scan_parameters(uhandle, consumer)
def _scan_header(self, uhandle, consumer):
# BLASTP 2.0.10 [Aug-26-1999]
#
#
# Reference: Altschul, Stephen F., Thomas L. Madden, Alejandro A. Schaf
# Jinghui Zhang, Zheng Zhang, Webb Miller, and David J. Lipman (1997),
# "Gapped BLAST and PSI-BLAST: a new generation of protein database sea
# programs", Nucleic Acids Res. 25:3389-3402.
#
# Query= test
# (140 letters)
#
# Database: sdqib40-1.35.seg.fa
# 1323 sequences; 223,339 total letters
#
# ========================================================
# This next example is from the online version of Blast,
# note there are TWO references, an RID line, and also
# the database is BEFORE the query line.
# Note there possibleuse of non-ASCII in the author names.
# ========================================================
#
# BLASTP 2.2.15 [Oct-15-2006]
# Reference: Altschul, Stephen F., Thomas L. Madden, Alejandro A. Sch??ffer,
# Jinghui Zhang, Zheng Zhang, Webb Miller, and David J. Lipman
# (1997), "Gapped BLAST and PSI-BLAST: a new generation of
# protein database search programs", Nucleic Acids Res. 25:3389-3402.
#
# Reference: Sch??ffer, Alejandro A., L. Aravind, Thomas L. Madden, Sergei
# Shavirin, John L. Spouge, Yuri I. Wolf, Eugene V. Koonin, and
# Stephen F. Altschul (2001), "Improving the accuracy of PSI-BLAST
# protein database searches with composition-based statistics
# and other refinements", Nucleic Acids Res. 29:2994-3005.
#
# RID: 1166022616-19998-65316425856.BLASTQ1
#
#
# Database: All non-redundant GenBank CDS
# translations+PDB+SwissProt+PIR+PRF excluding environmental samples
# 4,254,166 sequences; 1,462,033,012 total letters
# Query= gi:16127998
# Length=428
#
consumer.start_header()
read_and_call(uhandle, consumer.version, contains='BLAST')
read_and_call_while(uhandle, consumer.noevent, blank=1)
# There might be a <pre> line, for qblast output.
attempt_read_and_call(uhandle, consumer.noevent, start="<pre>")
# Read the reference(s)
while attempt_read_and_call(uhandle,
consumer.reference, start='Reference'):
# References are normally multiline terminated by a blank line
# (or, based on the old code, the RID line)
while True:
line = uhandle.readline()
if is_blank_line(line):
consumer.noevent(line)
break
elif line.startswith("RID"):
break
else:
# More of the reference
consumer.reference(line)
# Deal with the optional RID: ...
read_and_call_while(uhandle, consumer.noevent, blank=1)
attempt_read_and_call(uhandle, consumer.reference, start="RID:")
read_and_call_while(uhandle, consumer.noevent, blank=1)
# blastpgp may have a reference for compositional score matrix
# adjustment (see Bug 2502):
if attempt_read_and_call(
uhandle, consumer.reference, start="Reference"):
read_and_call_until(uhandle, consumer.reference, blank=1)
read_and_call_while(uhandle, consumer.noevent, blank=1)
# blastpgp has a Reference for composition-based statistics.
if attempt_read_and_call(
uhandle, consumer.reference, start="Reference"):
read_and_call_until(uhandle, consumer.reference, blank=1)
read_and_call_while(uhandle, consumer.noevent, blank=1)
line = uhandle.peekline()
assert line.strip() != ""
assert not line.startswith("RID:")
if line.startswith("Query="):
# This is an old style query then database...
# Read the Query lines and the following blank line.
read_and_call(uhandle, consumer.query_info, start='Query=')
read_and_call_until(uhandle, consumer.query_info, blank=1)
read_and_call_while(uhandle, consumer.noevent, blank=1)
# Read the database lines and the following blank line.
read_and_call_until(uhandle, consumer.database_info, end='total letters')
read_and_call(uhandle, consumer.database_info, contains='sequences')
read_and_call_while(uhandle, consumer.noevent, blank=1)
elif line.startswith("Database:"):
# This is a new style database then query...
read_and_call_until(uhandle, consumer.database_info, end='total letters')
read_and_call(uhandle, consumer.database_info, contains='sequences')
read_and_call_while(uhandle, consumer.noevent, blank=1)
# Read the Query lines and the following blank line.
# Or, on BLAST 2.2.22+ there is no blank link - need to spot
# the "... Score E" line instead.
read_and_call(uhandle, consumer.query_info, start='Query=')
# BLAST 2.2.25+ has a blank line before Length=
read_and_call_until(uhandle, consumer.query_info, start='Length=')
while True:
line = uhandle.peekline()
if not line.strip() or _score_e_re.search(line) is not None:
break
# It is more of the query (and its length)
read_and_call(uhandle, consumer.query_info)
read_and_call_while(uhandle, consumer.noevent, blank=1)
else:
raise ValueError("Invalid header?")
consumer.end_header()
def _scan_rounds(self, uhandle, consumer):
# Scan a bunch of rounds.
# Each round begins with either a "Searching......" line
# or a 'Score E' line followed by descriptions and alignments.
# The email server doesn't give the "Searching....." line.
# If there is no 'Searching.....' line then you'll first see a
# 'Results from round' line
while not self._eof(uhandle):
line = safe_peekline(uhandle)
if not line.startswith('Searching') and \
not line.startswith('Results from round') and \
_score_e_re.search(line) is None and \
'No hits found' not in line:
break
self._scan_descriptions(uhandle, consumer)
self._scan_alignments(uhandle, consumer)
def _scan_descriptions(self, uhandle, consumer):
# Searching..................................................done
# Results from round 2
#
#
# Sc
# Sequences producing significant alignments: (b
# Sequences used in model and found again:
#
# d1tde_2 3.4.1.4.4 (119-244) Thioredoxin reductase [Escherichia ...
# d1tcob_ 1.31.1.5.16 Calcineurin regulatory subunit (B-chain) [B...
# d1symb_ 1.31.1.2.2 Calcyclin (S100) [RAT (RATTUS NORVEGICUS)]
#
# Sequences not found previously or not previously below threshold:
#
# d1osa__ 1.31.1.5.11 Calmodulin [Paramecium tetraurelia]
# d1aoza3 2.5.1.3.3 (339-552) Ascorbate oxidase [zucchini (Cucurb...
#
# If PSI-BLAST, may also have:
#
# CONVERGED!
consumer.start_descriptions()
# Read 'Searching'
# This line seems to be missing in BLASTN 2.1.2 (others?)
attempt_read_and_call(uhandle, consumer.noevent, start='Searching')
# blastpgp 2.0.10 from NCBI 9/19/99 for Solaris sometimes crashes here.
# If this happens, the handle will yield no more information.
if not uhandle.peekline():
raise ValueError("Unexpected end of blast report. " +
"Looks suspiciously like a PSI-BLAST crash.")
# BLASTN 2.2.3 sometimes spews a bunch of warnings and errors here:
# Searching[blastall] WARNING: [000.000] AT1G08320: SetUpBlastSearch
# [blastall] ERROR: [000.000] AT1G08320: Blast:
# [blastall] ERROR: [000.000] AT1G08320: Blast: Query must be at leas
# done
# Reported by David Weisman.
# Check for these error lines and ignore them for now. Let
# the BlastErrorParser deal with them.
line = uhandle.peekline()
if "ERROR:" in line or line.startswith("done"):
read_and_call_while(uhandle, consumer.noevent, contains="ERROR:")
read_and_call(uhandle, consumer.noevent, start="done")
# Check to see if this is PSI-BLAST.
# If it is, the 'Searching' line will be followed by:
# (version 2.0.10)
# Searching.............................
# Results from round 2
# or (version 2.0.11)
# Searching.............................
#
#
# Results from round 2
# Skip a bunch of blank lines.
read_and_call_while(uhandle, consumer.noevent, blank=1)
# Check for the results line if it's there.
if attempt_read_and_call(uhandle, consumer.round, start='Results'):
read_and_call_while(uhandle, consumer.noevent, blank=1)
# Three things can happen here:
# 1. line contains 'Score E'
# 2. line contains "No hits found"
# 3. no descriptions
# The first one begins a bunch of descriptions. The last two
# indicates that no descriptions follow, and we should go straight
# to the alignments.
if not attempt_read_and_call(
uhandle, consumer.description_header,
has_re=_score_e_re):
# Either case 2 or 3. Look for "No hits found".
attempt_read_and_call(uhandle, consumer.no_hits,
contains='No hits found')
try:
read_and_call_while(uhandle, consumer.noevent, blank=1)
except ValueError as err:
if str(err) != "Unexpected end of stream.":
raise err
consumer.end_descriptions()
# Stop processing.
return
# Read the score header lines
read_and_call(uhandle, consumer.description_header,
start='Sequences producing')
# If PSI-BLAST, read the 'Sequences used in model' line.
attempt_read_and_call(uhandle, consumer.model_sequences,
start='Sequences used in model')
read_and_call_while(uhandle, consumer.noevent, blank=1)
# In BLAT, rather than a "No hits found" line, we just
# get no descriptions (and no alignments). This can be
# spotted because the next line is the database block:
if safe_peekline(uhandle).startswith(" Database:"):
consumer.end_descriptions()
# Stop processing.
return
# Read the descriptions and the following blank lines, making
# sure that there are descriptions.
if not uhandle.peekline().startswith('Sequences not found'):
read_and_call_until(uhandle, consumer.description, blank=1)
read_and_call_while(uhandle, consumer.noevent, blank=1)
# If PSI-BLAST, read the 'Sequences not found' line followed
# by more descriptions. However, I need to watch out for the
# case where there were no sequences not found previously, in
# which case there will be no more descriptions.
if attempt_read_and_call(uhandle, consumer.nonmodel_sequences,
start='Sequences not found'):
# Read the descriptions and the following blank lines.
read_and_call_while(uhandle, consumer.noevent, blank=1)
l = safe_peekline(uhandle)
# Brad -- added check for QUERY. On some PSI-BLAST outputs
# there will be a 'Sequences not found' line followed by no
# descriptions. Check for this case since the first thing you'll
# get is a blank line and then 'QUERY'
if not l.startswith('CONVERGED') and l[0] != '>' \
and not l.startswith('QUERY'):
read_and_call_until(uhandle, consumer.description, blank=1)
read_and_call_while(uhandle, consumer.noevent, blank=1)
attempt_read_and_call(uhandle, consumer.converged, start='CONVERGED')
read_and_call_while(uhandle, consumer.noevent, blank=1)
consumer.end_descriptions()
def _scan_alignments(self, uhandle, consumer):
if self._eof(uhandle):
return
# qblast inserts a helpful line here.
attempt_read_and_call(uhandle, consumer.noevent, start="ALIGNMENTS")
# First, check to see if I'm at the database report.
line = safe_peekline(uhandle)
if not line:
# EOF
return
elif line.startswith(' Database') or line.startswith("Lambda"):
return
elif line[0] == '>':
# XXX make a better check here between pairwise and masterslave
self._scan_pairwise_alignments(uhandle, consumer)
elif line.startswith('Effective'):
return
else:
# XXX put in a check to make sure I'm in a masterslave alignment
self._scan_masterslave_alignment(uhandle, consumer)
def _scan_pairwise_alignments(self, uhandle, consumer):
while not self._eof(uhandle):
line = safe_peekline(uhandle)
if line[0] != '>':
break
self._scan_one_pairwise_alignment(uhandle, consumer)
def _scan_one_pairwise_alignment(self, uhandle, consumer):
if self._eof(uhandle):
return
consumer.start_alignment()
self._scan_alignment_header(uhandle, consumer)
# Scan a bunch of score/alignment pairs.
while True:
if self._eof(uhandle):
# Shouldn't have issued that _scan_alignment_header event...
break
line = safe_peekline(uhandle)
if not line.startswith(' Score'):
break
self._scan_hsp(uhandle, consumer)
consumer.end_alignment()
def _scan_alignment_header(self, uhandle, consumer):
# >d1rip__ 2.24.7.1.1 Ribosomal S17 protein [Bacillus
# stearothermophilus]
# Length = 81
#
# Or, more recently with different white space:
#
# >gi|15799684|ref|NP_285696.1| threonine synthase ...
# gi|15829258|ref|NP_308031.1| threonine synthase
# ...
# Length=428
read_and_call(uhandle, consumer.title, start='>')
while True:
line = safe_readline(uhandle)
if line.lstrip().startswith(('Length =', 'Length=')):
consumer.length(line)
break
elif is_blank_line(line):
# Check to make sure I haven't missed the Length line
raise ValueError("I missed the Length in an alignment header")
consumer.title(line)
# Older versions of BLAST will have a line with some spaces.
# Version 2.0.14 (maybe 2.0.13?) and above print a true blank line.
if not attempt_read_and_call(uhandle, consumer.noevent,
start=' '):
read_and_call(uhandle, consumer.noevent, blank=1)
def _scan_hsp(self, uhandle, consumer):
consumer.start_hsp()
self._scan_hsp_header(uhandle, consumer)
self._scan_hsp_alignment(uhandle, consumer)
consumer.end_hsp()
def _scan_hsp_header(self, uhandle, consumer):
# Score = 22.7 bits (47), Expect = 2.5
# Identities = 10/36 (27%), Positives = 18/36 (49%)
# Strand = Plus / Plus
# Frame = +3
#
read_and_call(uhandle, consumer.score, start=' Score')
read_and_call(uhandle, consumer.identities, start=' Identities')
# BLASTN
attempt_read_and_call(uhandle, consumer.strand, start=' Strand')
# BLASTX, TBLASTN, TBLASTX
attempt_read_and_call(uhandle, consumer.frame, start=' Frame')
read_and_call(uhandle, consumer.noevent, blank=1)
def _scan_hsp_alignment(self, uhandle, consumer):
# Query: 11 GRGVSACA-------TCDGFFYRNQKVAVIGGGNTAVEEALYLSNIASEVHLIHRRDGF
# GRGVS+ TC Y + + V GGG+ + EE L + I R+
# Sbjct: 12 GRGVSSVVRRCIHKPTCKE--YAVKIIDVTGGGSFSAEEVQELREATLKEVDILRKVSG
#
# Query: 64 AEKILIKR 71
# I +K
# Sbjct: 70 PNIIQLKD 77
#
while True:
# Blastn adds an extra line filled with spaces before Query
attempt_read_and_call(uhandle, consumer.noevent, start=' ')
read_and_call(uhandle, consumer.query, start='Query')
read_and_call(uhandle, consumer.align, start=' ')
read_and_call(uhandle, consumer.sbjct, start='Sbjct')
try:
read_and_call_while(uhandle, consumer.noevent, blank=1)
except ValueError as err:
if str(err) != "Unexpected end of stream.":
raise err
# End of File (well, it looks like it with recent versions
# of BLAST for multiple queries after the Iterator class
# has broken up the whole file into chunks).
break
line = safe_peekline(uhandle)
# Alignment continues if I see a 'Query' or the spaces for Blastn.
if not (line.startswith('Query') or line.startswith(' ')):
break
def _scan_masterslave_alignment(self, uhandle, consumer):
consumer.start_alignment()
while True:
line = safe_readline(uhandle)
# Check to see whether I'm finished reading the alignment.
# This is indicated by 1) database section, 2) next psi-blast
# round, which can also be a 'Results from round' if no
# searching line is present
# patch by chapmanb
if line.startswith('Searching') or \
line.startswith('Results from round'):
uhandle.saveline(line)
break
elif line.startswith(' Database'):
uhandle.saveline(line)
break
elif is_blank_line(line):
consumer.noevent(line)
else:
consumer.multalign(line)
read_and_call_while(uhandle, consumer.noevent, blank=1)
consumer.end_alignment()
def _eof(self, uhandle):
try:
line = safe_peekline(uhandle)
except ValueError as err:
if str(err) != "Unexpected end of stream.":
raise err
line = ""
return not line
def _scan_database_report(self, uhandle, consumer):
# Database: sdqib40-1.35.seg.fa
# Posted date: Nov 1, 1999 4:25 PM
# Number of letters in database: 223,339
# Number of sequences in database: 1323
#
# Lambda K H
# 0.322 0.133 0.369
#
# Gapped
# Lambda K H
# 0.270 0.0470 0.230
#
##########################################
# Or, more recently Blast 2.2.15 gives less blank lines
##########################################
# Database: All non-redundant GenBank CDS translations+PDB+SwissProt+PIR+PRF excluding
# environmental samples
# Posted date: Dec 12, 2006 5:51 PM
# Number of letters in database: 667,088,753
# Number of sequences in database: 2,094,974
# Lambda K H
# 0.319 0.136 0.395
# Gapped
# Lambda K H
# 0.267 0.0410 0.140
if self._eof(uhandle):
return
consumer.start_database_report()
# Subset of the database(s) listed below
# Number of letters searched: 562,618,960
# Number of sequences searched: 228,924
if attempt_read_and_call(uhandle, consumer.noevent, start=" Subset"):
read_and_call(uhandle, consumer.noevent, contains="letters")
read_and_call(uhandle, consumer.noevent, contains="sequences")
read_and_call(uhandle, consumer.noevent, start=" ")
# Sameet Mehta reported seeing output from BLASTN 2.2.9 that
# was missing the "Database" stanza completely.
while attempt_read_and_call(uhandle, consumer.database,
start=' Database'):
# BLAT output ends abruptly here, without any of the other
# information. Check to see if this is the case. If so,
# then end the database report here gracefully.
if not uhandle.peekline().strip() or \
uhandle.peekline().startswith("BLAST"):
consumer.end_database_report()
return
# Database can span multiple lines.
read_and_call_until(uhandle, consumer.database, start=' Posted')
read_and_call(uhandle, consumer.posted_date, start=' Posted')
read_and_call(uhandle, consumer.num_letters_in_database,
start=' Number of letters')
read_and_call(uhandle, consumer.num_sequences_in_database,
start=' Number of sequences')
# There may not be a line starting with spaces...
attempt_read_and_call(uhandle, consumer.noevent, start=' ')
line = safe_readline(uhandle)
uhandle.saveline(line)
if 'Lambda' in line:
break
try:
read_and_call(uhandle, consumer.noevent, start='Lambda')
read_and_call(uhandle, consumer.ka_params)
except Exception: # TODO: ValueError, AttributeError?
pass
# This blank line is optional:
attempt_read_and_call(uhandle, consumer.noevent, blank=1)
# not BLASTP
attempt_read_and_call(uhandle, consumer.gapped, start='Gapped')
# not TBLASTX
if attempt_read_and_call(uhandle, consumer.noevent, start='Lambda'):
read_and_call(uhandle, consumer.ka_params_gap)
# Blast 2.2.4 can sometimes skip the whole parameter section.
# Thus, I need to be careful not to read past the end of the
# file.
try:
read_and_call_while(uhandle, consumer.noevent, blank=1)
except ValueError as x:
if str(x) != "Unexpected end of stream.":
raise
consumer.end_database_report()
def _scan_parameters(self, uhandle, consumer):
# Matrix: BLOSUM62
# Gap Penalties: Existence: 11, Extension: 1
# Number of Hits to DB: 50604
# Number of Sequences: 1323
# Number of extensions: 1526
# Number of successful extensions: 6
# Number of sequences better than 10.0: 5
# Number of HSP's better than 10.0 without gapping: 5
# Number of HSP's successfully gapped in prelim test: 0
# Number of HSP's that attempted gapping in prelim test: 1
# Number of HSP's gapped (non-prelim): 5
# length of query: 140
# length of database: 223,339
# effective HSP length: 39
# effective length of query: 101
# effective length of database: 171,742
# effective search space: 17345942
# effective search space used: 17345942
# T: 11
# A: 40
# X1: 16 ( 7.4 bits)
# X2: 38 (14.8 bits)
# X3: 64 (24.9 bits)
# S1: 41 (21.9 bits)
# S2: 42 (20.8 bits)
##########################################
# Or, more recently Blast(x) 2.2.15 gives
##########################################
# Matrix: BLOSUM62
# Gap Penalties: Existence: 11, Extension: 1
# Number of Sequences: 4535438
# Number of Hits to DB: 2,588,844,100
# Number of extensions: 60427286
# Number of successful extensions: 126433
# Number of sequences better than 2.0: 30
# Number of HSP's gapped: 126387
# Number of HSP's successfully gapped: 35
# Length of query: 291
# Length of database: 1,573,298,872
# Length adjustment: 130
# Effective length of query: 161
# Effective length of database: 983,691,932
# Effective search space: 158374401052
# Effective search space used: 158374401052
# Neighboring words threshold: 12
# Window for multiple hits: 40
# X1: 16 ( 7.3 bits)
# X2: 38 (14.6 bits)
# X3: 64 (24.7 bits)
# S1: 41 (21.7 bits)
# S2: 32 (16.9 bits)
# Blast 2.2.4 can sometimes skip the whole parameter section.
# BLAT also skips the whole parameter section.
# Thus, check to make sure that the parameter section really
# exists.
if not uhandle.peekline().strip():
return
# BLASTN 2.2.9 looks like it reverses the "Number of Hits" and
# "Number of Sequences" lines.
consumer.start_parameters()
# Matrix line may be missing in BLASTN 2.2.9
attempt_read_and_call(uhandle, consumer.matrix, start='Matrix')
# not TBLASTX
attempt_read_and_call(uhandle, consumer.gap_penalties, start='Gap')
attempt_read_and_call(uhandle, consumer.num_sequences,
start='Number of Sequences')
attempt_read_and_call(uhandle, consumer.num_hits,
start='Number of Hits')
attempt_read_and_call(uhandle, consumer.num_sequences,
start='Number of Sequences')
attempt_read_and_call(uhandle, consumer.num_extends,
start='Number of extensions')
attempt_read_and_call(uhandle, consumer.num_good_extends,
start='Number of successful')
attempt_read_and_call(uhandle, consumer.num_seqs_better_e,
start='Number of sequences')
# not BLASTN, TBLASTX
if attempt_read_and_call(uhandle, consumer.hsps_no_gap,
start="Number of HSP's better"):
# BLASTN 2.2.9
if attempt_read_and_call(uhandle, consumer.noevent,
start="Number of HSP's gapped:"):
read_and_call(uhandle, consumer.noevent,
start="Number of HSP's successfully")
# This is omitted in 2.2.15
attempt_read_and_call(uhandle, consumer.noevent,
start="Number of extra gapped extensions")
else:
read_and_call(uhandle, consumer.hsps_prelim_gapped,
start="Number of HSP's successfully")
read_and_call(uhandle, consumer.hsps_prelim_gap_attempted,
start="Number of HSP's that")
read_and_call(uhandle, consumer.hsps_gapped,
start="Number of HSP's gapped")
# e.g. BLASTX 2.2.15 where the "better" line is missing
elif attempt_read_and_call(uhandle, consumer.noevent,
start="Number of HSP's gapped"):
read_and_call(uhandle, consumer.noevent,
start="Number of HSP's successfully")
# not in blastx 2.2.1
attempt_read_and_call(uhandle, consumer.query_length,
has_re=re.compile(r"[Ll]ength of query"))
# Not in BLASTX 2.2.22+
attempt_read_and_call(uhandle, consumer.database_length,
has_re=re.compile(r"[Ll]ength of \s*[Dd]atabase"))
# BLASTN 2.2.9
attempt_read_and_call(uhandle, consumer.noevent,
start="Length adjustment")
attempt_read_and_call(uhandle, consumer.effective_hsp_length,
start='effective HSP')
# Not in blastx 2.2.1
attempt_read_and_call(
uhandle, consumer.effective_query_length,
has_re=re.compile(r'[Ee]ffective length of query'))
# This is not in BLASTP 2.2.15
attempt_read_and_call(
uhandle, consumer.effective_database_length,
has_re=re.compile(r'[Ee]ffective length of \s*[Dd]atabase'))
# Not in blastx 2.2.1, added a ':' to distinguish between
# this and the 'effective search space used' line
attempt_read_and_call(
uhandle, consumer.effective_search_space,
has_re=re.compile(r'[Ee]ffective search space:'))
# Does not appear in BLASTP 2.0.5
attempt_read_and_call(
uhandle, consumer.effective_search_space_used,
has_re=re.compile(r'[Ee]ffective search space used'))
# BLASTX, TBLASTN, TBLASTX
attempt_read_and_call(uhandle, consumer.frameshift, start='frameshift')
# not in BLASTN 2.2.9
attempt_read_and_call(uhandle, consumer.threshold, start='T')
# In BLASTX 2.2.15 replaced by: "Neighboring words threshold: 12"
attempt_read_and_call(uhandle, consumer.threshold, start='Neighboring words threshold')
# not in BLASTX 2.2.15
attempt_read_and_call(uhandle, consumer.window_size, start='A')
# get this instead: "Window for multiple hits: 40"
attempt_read_and_call(uhandle, consumer.window_size, start='Window for multiple hits')
# not in BLASTX 2.2.22+
attempt_read_and_call(uhandle, consumer.dropoff_1st_pass, start='X1')
# not TBLASTN
attempt_read_and_call(uhandle, consumer.gap_x_dropoff, start='X2')
# not BLASTN, TBLASTX
attempt_read_and_call(uhandle, consumer.gap_x_dropoff_final,
start='X3')
# not TBLASTN
attempt_read_and_call(uhandle, consumer.gap_trigger, start='S1')
# not in blastx 2.2.1
# first we make sure we have additional lines to work with, if
# not then the file is done and we don't have a final S2
if not is_blank_line(uhandle.peekline(), allow_spaces=1):
read_and_call(uhandle, consumer.blast_cutoff, start='S2')
consumer.end_parameters()
class BlastParser(AbstractParser):
"""Parses BLAST data into a Record.Blast object.
"""
def __init__(self):
"""__init__(self)"""
self._scanner = _Scanner()
self._consumer = _BlastConsumer()
def parse(self, handle):
"""parse(self, handle)"""
self._scanner.feed(handle, self._consumer)
return self._consumer.data
class PSIBlastParser(AbstractParser):
"""Parses BLAST data into a Record.PSIBlast object.
"""
def __init__(self):
"""__init__(self)"""
self._scanner = _Scanner()
self._consumer = _PSIBlastConsumer()
def parse(self, handle):
"""parse(self, handle)"""
self._scanner.feed(handle, self._consumer)
return self._consumer.data
class _HeaderConsumer(object):
def start_header(self):
self._header = Record.Header()
def version(self, line):
c = line.split()
self._header.application = c[0]
self._header.version = c[1]
if len(c) > 2:
# The date is missing in the new C++ output from blastx 2.2.22+
# Just get "BLASTX 2.2.22+\n" and that's all.
self._header.date = c[2][1:-1]
def reference(self, line):
if line.startswith('Reference: '):
self._header.reference = line[11:]
else:
self._header.reference = self._header.reference + line
def query_info(self, line):
if line.startswith('Query= '):
self._header.query = line[7:].lstrip()
elif line.startswith('Length='):
# New style way to give the query length in BLAST 2.2.22+ (the C++ code)
self._header.query_letters = _safe_int(line[7:].strip())
elif not line.startswith(' '): # continuation of query_info
self._header.query = "%s%s" % (self._header.query, line)
else:
# Hope it is the old style way to give the query length:
letters, = _re_search(
r"([0-9,]+) letters", line,
"I could not find the number of letters in line\n%s" % line)
self._header.query_letters = _safe_int(letters)
def database_info(self, line):
line = line.rstrip()
if line.startswith('Database: '):
self._header.database = line[10:]
elif not line.endswith('total letters'):
if self._header.database:
# Need to include a space when merging multi line datase descr
self._header.database = self._header.database + " " + line.strip()
else:
self._header.database = line.strip()
else:
sequences, letters = _re_search(
r"([0-9,]+) sequences; ([0-9,-]+) total letters", line,
"I could not find the sequences and letters in line\n%s" % line)
self._header.database_sequences = _safe_int(sequences)
self._header.database_letters = _safe_int(letters)
def end_header(self):
# Get rid of the trailing newlines
self._header.reference = self._header.reference.rstrip()
self._header.query = self._header.query.rstrip()
class _DescriptionConsumer(object):
def start_descriptions(self):
self._descriptions = []
self._model_sequences = []
self._nonmodel_sequences = []
self._converged = 0
self._type = None
self._roundnum = None
self.__has_n = 0 # Does the description line contain an N value?
def description_header(self, line):
if line.startswith('Sequences producing'):
cols = line.split()
if cols[-1] == 'N':
self.__has_n = 1
def description(self, line):
dh = self._parse(line)
if self._type == 'model':
self._model_sequences.append(dh)
elif self._type == 'nonmodel':
self._nonmodel_sequences.append(dh)
else:
self._descriptions.append(dh)
def model_sequences(self, line):
self._type = 'model'
def nonmodel_sequences(self, line):
self._type = 'nonmodel'
def converged(self, line):
self._converged = 1
def no_hits(self, line):
pass
def round(self, line):
if not line.startswith('Results from round'):
raise ValueError("I didn't understand the round line\n%s" % line)
self._roundnum = _safe_int(line[18:].strip())
def end_descriptions(self):
pass
def _parse(self, description_line):
line = description_line # for convenience
dh = Record.Description()
# I need to separate the score and p-value from the title.
# sp|P21297|FLBT_CAUCR FLBT PROTEIN [snip] 284 7e-77
# sp|P21297|FLBT_CAUCR FLBT PROTEIN [snip] 284 7e-77 1
# special cases to handle:
# - title must be preserved exactly (including whitespaces)
# - score could be equal to e-value (not likely, but what if??)
# - sometimes there's an "N" score of '1'.
cols = line.split()
if len(cols) < 3:
raise ValueError(
"Line does not appear to contain description:\n%s" % line)
if self.__has_n:
i = line.rfind(cols[-1]) # find start of N
i = line.rfind(cols[-2], 0, i) # find start of p-value
i = line.rfind(cols[-3], 0, i) # find start of score
else:
i = line.rfind(cols[-1]) # find start of p-value
i = line.rfind(cols[-2], 0, i) # find start of score
if self.__has_n:
dh.title, dh.score, dh.e, dh.num_alignments = \
line[:i].rstrip(), cols[-3], cols[-2], cols[-1]
else:
dh.title, dh.score, dh.e, dh.num_alignments = \
line[:i].rstrip(), cols[-2], cols[-1], 1
dh.num_alignments = _safe_int(dh.num_alignments)
dh.score = _safe_int(dh.score)
dh.e = _safe_float(dh.e)
return dh
class _AlignmentConsumer(object):
# This is a little bit tricky. An alignment can either be a
# pairwise alignment or a multiple alignment. Since it's difficult
# to know a-priori which one the blast record will contain, I'm going
# to make one class that can parse both of them.
def start_alignment(self):
self._alignment = Record.Alignment()
self._multiple_alignment = Record.MultipleAlignment()
def title(self, line):
if self._alignment.title:
self._alignment.title += " "
self._alignment.title += line.strip()
def length(self, line):
# e.g. "Length = 81" or more recently, "Length=428"
parts = line.replace(" ", "").split("=")
assert len(parts) == 2, "Unrecognised format length line"
self._alignment.length = parts[1]
self._alignment.length = _safe_int(self._alignment.length)
def multalign(self, line):
# Standalone version uses 'QUERY', while WWW version uses blast_tmp.
if line.startswith('QUERY') or line.startswith('blast_tmp'):
# If this is the first line of the multiple alignment,
# then I need to figure out how the line is formatted.
# Format of line is:
# QUERY 1 acttg...gccagaggtggtttattcagtctccataagagaggggacaaacg 60
try:
name, start, seq, end = line.split()
except ValueError:
raise ValueError("I do not understand the line\n%s" % line)
self._start_index = line.index(start, len(name))
self._seq_index = line.index(seq,
self._start_index + len(start))
# subtract 1 for the space
self._name_length = self._start_index - 1
self._start_length = self._seq_index - self._start_index - 1
self._seq_length = line.rfind(end) - self._seq_index - 1
# self._seq_index = line.index(seq)
# # subtract 1 for the space
# self._seq_length = line.rfind(end) - self._seq_index - 1
# self._start_index = line.index(start)
# self._start_length = self._seq_index - self._start_index - 1
# self._name_length = self._start_index
# Extract the information from the line
name = line[:self._name_length]
name = name.rstrip()
start = line[self._start_index:self._start_index + self._start_length]
start = start.rstrip()
if start:
start = _safe_int(start)
end = line[self._seq_index + self._seq_length:].rstrip()
if end:
end = _safe_int(end)
seq = line[self._seq_index:self._seq_index + self._seq_length].rstrip()
# right pad the sequence with spaces if necessary
if len(seq) < self._seq_length:
seq += ' ' * (self._seq_length - len(seq))
# I need to make sure the sequence is aligned correctly with the query.
# First, I will find the length of the query. Then, if necessary,
# I will pad my current sequence with spaces so that they will line
# up correctly.
# Two possible things can happen:
# QUERY
# 504
#
# QUERY
# 403
#
# Sequence 504 will need padding at the end. Since I won't know
# this until the end of the alignment, this will be handled in
# end_alignment.
# Sequence 403 will need padding before being added to the alignment.
align = self._multiple_alignment.alignment # for convenience
align.append((name, start, seq, end))
# This is old code that tried to line up all the sequences
# in a multiple alignment by using the sequence title's as
# identifiers. The problem with this is that BLAST assigns
# different HSP's from the same sequence the same id. Thus,
# in one alignment block, there may be multiple sequences with
# the same id. I'm not sure how to handle this, so I'm not
# going to.
# # If the sequence is the query, then just add it.
# if name == 'QUERY':
# if len(align) == 0:
# align.append((name, start, seq))
# else:
# aname, astart, aseq = align[0]
# if name != aname:
# raise ValueError, "Query is not the first sequence"
# aseq = aseq + seq
# align[0] = aname, astart, aseq
# else:
# if len(align) == 0:
# raise ValueError, "I could not find the query sequence"
# qname, qstart, qseq = align[0]
#
# # Now find my sequence in the multiple alignment.
# for i in range(1, len(align)):
# aname, astart, aseq = align[i]
# if name == aname:
# index = i
# break
# else:
# # If I couldn't find it, then add a new one.
# align.append((None, None, None))
# index = len(align)-1
# # Make sure to left-pad it.
# aname, astart, aseq = name, start, ' '*(len(qseq)-len(seq))
#
# if len(qseq) != len(aseq) + len(seq):
# # If my sequences are shorter than the query sequence,
# # then I will need to pad some spaces to make them line up.
# # Since I've already right padded seq, that means aseq
# # must be too short.
# aseq = aseq + ' '*(len(qseq)-len(aseq)-len(seq))
# aseq = aseq + seq
# if astart is None:
# astart = start
# align[index] = aname, astart, aseq
def end_alignment(self):
# Remove trailing newlines
if self._alignment:
self._alignment.title = self._alignment.title.rstrip()
# This code is also obsolete. See note above.
# If there's a multiple alignment, I will need to make sure
# all the sequences are aligned. That is, I may need to
# right-pad the sequences.
# if self._multiple_alignment is not None:
# align = self._multiple_alignment.alignment
# seqlen = None
# for i in range(len(align)):
# name, start, seq = align[i]
# if seqlen is None:
# seqlen = len(seq)
# else:
# if len(seq) < seqlen:
# seq = seq + ' '*(seqlen - len(seq))
# align[i] = name, start, seq
# elif len(seq) > seqlen:
# raise ValueError, \
# "Sequence %s is longer than the query" % name
# Clean up some variables, if they exist.
try:
del self._seq_index
del self._seq_length
del self._start_index
del self._start_length
del self._name_length
except AttributeError:
pass
class _HSPConsumer(object):
def start_hsp(self):
self._hsp = Record.HSP()
def score(self, line):
self._hsp.bits, self._hsp.score = _re_search(
r"Score =\s*([0-9.e+]+) bits \(([0-9]+)\)", line,
"I could not find the score in line\n%s" % line)
self._hsp.score = _safe_float(self._hsp.score)
self._hsp.bits = _safe_float(self._hsp.bits)
x, y = _re_search(
r"Expect\(?(\d*)\)? = +([0-9.e\-|\+]+)", line,
"I could not find the expect in line\n%s" % line)
if x:
self._hsp.num_alignments = _safe_int(x)
else:
self._hsp.num_alignments = 1
self._hsp.expect = _safe_float(y)
def identities(self, line):
x, y = _re_search(
r"Identities = (\d+)\/(\d+)", line,
"I could not find the identities in line\n%s" % line)
self._hsp.identities = _safe_int(x), _safe_int(y)
self._hsp.align_length = _safe_int(y)
if 'Positives' in line:
x, y = _re_search(
r"Positives = (\d+)\/(\d+)", line,
"I could not find the positives in line\n%s" % line)
self._hsp.positives = _safe_int(x), _safe_int(y)
assert self._hsp.align_length == _safe_int(y)
if 'Gaps' in line:
x, y = _re_search(
r"Gaps = (\d+)\/(\d+)", line,
"I could not find the gaps in line\n%s" % line)
self._hsp.gaps = _safe_int(x), _safe_int(y)
assert self._hsp.align_length == _safe_int(y)
def strand(self, line):
self._hsp.strand = _re_search(
r"Strand\s?=\s?(\w+)\s?/\s?(\w+)", line,
"I could not find the strand in line\n%s" % line)
def frame(self, line):
# Frame can be in formats:
# Frame = +1
# Frame = +2 / +2
if '/' in line:
self._hsp.frame = _re_search(
r"Frame\s?=\s?([-+][123])\s?/\s?([-+][123])", line,
"I could not find the frame in line\n%s" % line)
else:
self._hsp.frame = _re_search(
r"Frame = ([-+][123])", line,
"I could not find the frame in line\n%s" % line)
# Match a space, if one is available. Masahir Ishikawa found a
# case where there's no space between the start and the sequence:
# Query: 100tt 101
# line below modified by Yair Benita, Sep 2004
# Note that the colon is not always present. 2006
_query_re = re.compile(r"Query(:?) \s*(\d+)\s*(.+) (\d+)")
def query(self, line):
m = self._query_re.search(line)
if m is None:
if line.strip() == "Query ------------------------------------------------------------":
# Special case - long gap relative to the subject,
# note there is no start/end present, cannot update those
self._hsp.query += "-" * 60
self._query_len = 60 # number of dashes
self._query_start_index = 13 # offset of first dash
return
raise ValueError("I could not find the query in line\n%s" % line)
# line below modified by Yair Benita, Sep 2004.
# added the end attribute for the query
colon, start, seq, end = m.groups()
seq = seq.strip()
self._hsp.query += seq
if self._hsp.query_start is None:
self._hsp.query_start = _safe_int(start)
# line below added by Yair Benita, Sep 2004.
# added the end attribute for the query
self._hsp.query_end = _safe_int(end)
# Get index for sequence start (regular expression element 3)
self._query_start_index = m.start(3)
self._query_len = len(seq)
def align(self, line):
seq = line[self._query_start_index:].rstrip()
if len(seq) < self._query_len:
# Make sure the alignment is the same length as the query
seq += ' ' * (self._query_len - len(seq))
elif len(seq) < self._query_len:
raise ValueError("Match is longer than the query in line\n%s"
% line)
self._hsp.match = self._hsp.match + seq
# To match how we do the query, cache the regular expression.
# Note that the colon is not always present.
_sbjct_re = re.compile(r"Sbjct(:?) \s*(\d+)\s*(.+) (\d+)")
def sbjct(self, line):
m = self._sbjct_re.search(line)
if m is None:
raise ValueError("I could not find the sbjct in line\n%s" % line)
colon, start, seq, end = m.groups()
# mikep 26/9/00
# On occasion, there is a blast hit with no subject match
# so far, it only occurs with 1-line short "matches"
# I have decided to let these pass as they appear
if not seq.strip():
seq = ' ' * self._query_len
else:
seq = seq.strip()
self._hsp.sbjct += seq
if self._hsp.sbjct_start is None:
self._hsp.sbjct_start = _safe_int(start)
self._hsp.sbjct_end = _safe_int(end)
if len(seq) != self._query_len:
raise ValueError(
"QUERY and SBJCT sequence lengths don't match (%i %r vs %i) in line\n%s"
% (self._query_len, self._hsp.query, len(seq), line))
del self._query_start_index # clean up unused variables
del self._query_len
def end_hsp(self):
pass
class _DatabaseReportConsumer(object):
def start_database_report(self):
self._dr = Record.DatabaseReport()
def database(self, line):
m = re.search(r"Database: (.+)$", line)
if m:
self._dr.database_name.append(m.group(1))
elif self._dr.database_name:
# This must be a continuation of the previous name.
self._dr.database_name[-1] = "%s%s" % (self._dr.database_name[-1],
line.strip())
def posted_date(self, line):
self._dr.posted_date.append(_re_search(
r"Posted date:\s*(.+)$", line,
"I could not find the posted date in line\n%s" % line))
def num_letters_in_database(self, line):
letters, = _get_cols(
line, (-1,), ncols=6, expected={2: "letters", 4: "database:"})
self._dr.num_letters_in_database.append(_safe_int(letters))
def num_sequences_in_database(self, line):
sequences, = _get_cols(
line, (-1,), ncols=6, expected={2: "sequences", 4: "database:"})
self._dr.num_sequences_in_database.append(_safe_int(sequences))
def ka_params(self, line):
self._dr.ka_params = [_safe_float(x) for x in line.split()]
def gapped(self, line):
self._dr.gapped = 1
def ka_params_gap(self, line):
self._dr.ka_params_gap = [_safe_float(x) for x in line.split()]
def end_database_report(self):
pass
class _ParametersConsumer(object):
def start_parameters(self):
self._params = Record.Parameters()
def matrix(self, line):
self._params.matrix = line[8:].rstrip()
def gap_penalties(self, line):
self._params.gap_penalties = [_safe_float(x) for x in _get_cols(
line, (3, 5), ncols=6, expected={2: "Existence:", 4: "Extension:"})]
def num_hits(self, line):
if '1st pass' in line:
x, = _get_cols(line, (-4,), ncols=11, expected={2: "Hits"})
self._params.num_hits = _safe_int(x)
else:
x, = _get_cols(line, (-1,), ncols=6, expected={2: "Hits"})
self._params.num_hits = _safe_int(x)
def num_sequences(self, line):
if '1st pass' in line:
x, = _get_cols(line, (-4,), ncols=9, expected={2: "Sequences:"})
self._params.num_sequences = _safe_int(x)
else:
x, = _get_cols(line, (-1,), ncols=4, expected={2: "Sequences:"})
self._params.num_sequences = _safe_int(x)
def num_extends(self, line):
if '1st pass' in line:
x, = _get_cols(line, (-4,), ncols=9, expected={2: "extensions:"})
self._params.num_extends = _safe_int(x)
else:
x, = _get_cols(line, (-1,), ncols=4, expected={2: "extensions:"})
self._params.num_extends = _safe_int(x)
def num_good_extends(self, line):
if '1st pass' in line:
x, = _get_cols(line, (-4,), ncols=10, expected={3: "extensions:"})
self._params.num_good_extends = _safe_int(x)
else:
x, = _get_cols(line, (-1,), ncols=5, expected={3: "extensions:"})
self._params.num_good_extends = _safe_int(x)
def num_seqs_better_e(self, line):
self._params.num_seqs_better_e, = _get_cols(
line, (-1,), ncols=7, expected={2: "sequences"})
self._params.num_seqs_better_e = _safe_int(
self._params.num_seqs_better_e)
def hsps_no_gap(self, line):
self._params.hsps_no_gap, = _get_cols(
line, (-1,), ncols=9, expected={3: "better", 7: "gapping:"})
self._params.hsps_no_gap = _safe_int(self._params.hsps_no_gap)
def hsps_prelim_gapped(self, line):
self._params.hsps_prelim_gapped, = _get_cols(
line, (-1,), ncols=9, expected={4: "gapped", 6: "prelim"})
self._params.hsps_prelim_gapped = _safe_int(
self._params.hsps_prelim_gapped)
def hsps_prelim_gapped_attempted(self, line):
self._params.hsps_prelim_gapped_attempted, = _get_cols(
line, (-1,), ncols=10, expected={4: "attempted", 7: "prelim"})
self._params.hsps_prelim_gapped_attempted = _safe_int(
self._params.hsps_prelim_gapped_attempted)
def hsps_gapped(self, line):
self._params.hsps_gapped, = _get_cols(
line, (-1,), ncols=6, expected={3: "gapped"})
self._params.hsps_gapped = _safe_int(self._params.hsps_gapped)
def query_length(self, line):
self._params.query_length, = _get_cols(
line.lower(), (-1,), ncols=4, expected={0: "length", 2: "query:"})
self._params.query_length = _safe_int(self._params.query_length)
def database_length(self, line):
self._params.database_length, = _get_cols(
line.lower(), (-1,), ncols=4, expected={0: "length", 2: "database:"})
self._params.database_length = _safe_int(self._params.database_length)
def effective_hsp_length(self, line):
self._params.effective_hsp_length, = _get_cols(
line, (-1,), ncols=4, expected={1: "HSP", 2: "length:"})
self._params.effective_hsp_length = _safe_int(
self._params.effective_hsp_length)
def effective_query_length(self, line):
self._params.effective_query_length, = _get_cols(
line, (-1,), ncols=5, expected={1: "length", 3: "query:"})
self._params.effective_query_length = _safe_int(
self._params.effective_query_length)
def effective_database_length(self, line):
self._params.effective_database_length, = _get_cols(
line.lower(), (-1,), ncols=5, expected={1: "length", 3: "database:"})
self._params.effective_database_length = _safe_int(
self._params.effective_database_length)
def effective_search_space(self, line):
self._params.effective_search_space, = _get_cols(
line, (-1,), ncols=4, expected={1: "search"})
self._params.effective_search_space = _safe_int(
self._params.effective_search_space)
def effective_search_space_used(self, line):
self._params.effective_search_space_used, = _get_cols(
line, (-1,), ncols=5, expected={1: "search", 3: "used:"})
self._params.effective_search_space_used = _safe_int(
self._params.effective_search_space_used)
def frameshift(self, line):
self._params.frameshift = _get_cols(line, (4, 5), ncols=6,
expected={0: "frameshift", 2: "decay"})
def threshold(self, line):
if line[:2] == "T:":
# Assume its an old style line like "T: 123"
self._params.threshold, = _get_cols(line, (1,), ncols=2,
expected={0: "T:"})
elif line[:28] == "Neighboring words threshold:":
self._params.threshold, = _get_cols(line, (3,), ncols=4,
expected={0: "Neighboring",
1: "words",
2: "threshold:"})
else:
raise ValueError("Unrecognised threshold line:\n%s" % line)
self._params.threshold = _safe_int(self._params.threshold)
def window_size(self, line):
if line[:2] == "A:":
self._params.window_size, = _get_cols(line, (1,), ncols=2,
expected={0: "A:"})
elif line[:25] == "Window for multiple hits:":
self._params.window_size, = _get_cols(line, (4,), ncols=5,
expected={0: "Window",
2: "multiple",
3: "hits:"})
else:
raise ValueError("Unrecognised window size line:\n%s" % line)
self._params.window_size = _safe_int(self._params.window_size)
def dropoff_1st_pass(self, line):
score, bits = _re_search(
r"X1: (\d+) \(\s*([0-9,.]+) bits\)", line,
"I could not find the dropoff in line\n%s" % line)
self._params.dropoff_1st_pass = _safe_int(score), _safe_float(bits)
def gap_x_dropoff(self, line):
score, bits = _re_search(
r"X2: (\d+) \(\s*([0-9,.]+) bits\)", line,
"I could not find the gap dropoff in line\n%s" % line)
self._params.gap_x_dropoff = _safe_int(score), _safe_float(bits)
def gap_x_dropoff_final(self, line):
score, bits = _re_search(
r"X3: (\d+) \(\s*([0-9,.]+) bits\)", line,
"I could not find the gap dropoff final in line\n%s" % line)
self._params.gap_x_dropoff_final = _safe_int(score), _safe_float(bits)
def gap_trigger(self, line):
score, bits = _re_search(
r"S1: (\d+) \(\s*([0-9,.]+) bits\)", line,
"I could not find the gap trigger in line\n%s" % line)
self._params.gap_trigger = _safe_int(score), _safe_float(bits)
def blast_cutoff(self, line):
score, bits = _re_search(
r"S2: (\d+) \(\s*([0-9,.]+) bits\)", line,
"I could not find the blast cutoff in line\n%s" % line)
self._params.blast_cutoff = _safe_int(score), _safe_float(bits)
def end_parameters(self):
pass
class _BlastConsumer(AbstractConsumer,
_HeaderConsumer,
_DescriptionConsumer,
_AlignmentConsumer,
_HSPConsumer,
_DatabaseReportConsumer,
_ParametersConsumer
):
# This Consumer is inherits from many other consumer classes that handle
# the actual dirty work. An alternate way to do it is to create objects
# of those classes and then delegate the parsing tasks to them in a
# decorator-type pattern. The disadvantage of that is that the method
# names will need to be resolved in this classes. However, using
# a decorator will retain more control in this class (which may or
# may not be a bad thing). In addition, having each sub-consumer as
# its own object prevents this object's dictionary from being cluttered
# with members and reduces the chance of member collisions.
def __init__(self):
self.data = None
def round(self, line):
# Make sure nobody's trying to pass me PSI-BLAST data!
raise ValueError("This consumer doesn't handle PSI-BLAST data")
def start_header(self):
self.data = Record.Blast()
_HeaderConsumer.start_header(self)
def end_header(self):
_HeaderConsumer.end_header(self)
self.data.__dict__.update(self._header.__dict__)
def end_descriptions(self):
self.data.descriptions = self._descriptions
def end_alignment(self):
_AlignmentConsumer.end_alignment(self)
if self._alignment.hsps:
self.data.alignments.append(self._alignment)
if self._multiple_alignment.alignment:
self.data.multiple_alignment = self._multiple_alignment
def end_hsp(self):
_HSPConsumer.end_hsp(self)
try:
self._alignment.hsps.append(self._hsp)
except AttributeError:
raise ValueError("Found an HSP before an alignment")
def end_database_report(self):
_DatabaseReportConsumer.end_database_report(self)
self.data.__dict__.update(self._dr.__dict__)
def end_parameters(self):
_ParametersConsumer.end_parameters(self)
self.data.__dict__.update(self._params.__dict__)
class _PSIBlastConsumer(AbstractConsumer,
_HeaderConsumer,
_DescriptionConsumer,
_AlignmentConsumer,
_HSPConsumer,
_DatabaseReportConsumer,
_ParametersConsumer
):
def __init__(self):
self.data = None
def start_header(self):
self.data = Record.PSIBlast()
_HeaderConsumer.start_header(self)
def end_header(self):
_HeaderConsumer.end_header(self)
self.data.__dict__.update(self._header.__dict__)
def start_descriptions(self):
self._round = Record.Round()
self.data.rounds.append(self._round)
_DescriptionConsumer.start_descriptions(self)
def end_descriptions(self):
_DescriptionConsumer.end_descriptions(self)
self._round.number = self._roundnum
if self._descriptions:
self._round.new_seqs.extend(self._descriptions)
self._round.reused_seqs.extend(self._model_sequences)
self._round.new_seqs.extend(self._nonmodel_sequences)
if self._converged:
self.data.converged = 1
def end_alignment(self):
_AlignmentConsumer.end_alignment(self)
if self._alignment.hsps:
self._round.alignments.append(self._alignment)
if self._multiple_alignment:
self._round.multiple_alignment = self._multiple_alignment
def end_hsp(self):
_HSPConsumer.end_hsp(self)
try:
self._alignment.hsps.append(self._hsp)
except AttributeError:
raise ValueError("Found an HSP before an alignment")
def end_database_report(self):
_DatabaseReportConsumer.end_database_report(self)
self.data.__dict__.update(self._dr.__dict__)
def end_parameters(self):
_ParametersConsumer.end_parameters(self)
self.data.__dict__.update(self._params.__dict__)
class Iterator(object):
"""Iterates over a file of multiple BLAST results.
Methods:
next Return the next record from the stream, or None.
"""
def __init__(self, handle, parser=None):
"""__init__(self, handle, parser=None)
Create a new iterator. handle is a file-like object. parser
is an optional Parser object to change the results into another form.
If set to None, then the raw contents of the file will be returned.
"""
try:
handle.readline
except AttributeError:
raise ValueError(
"I expected a file handle or file-like object, got %s"
% type(handle))
self._uhandle = File.UndoHandle(handle)
self._parser = parser
self._header = []
def __next__(self):
"""next(self) -> object
Return the next Blast record from the file. If no more records,
return None.
"""
lines = []
query = False
while True:
line = self._uhandle.readline()
if not line:
break
# If I've reached the next one, then put the line back and stop.
if lines and (line.startswith('BLAST') or
line.startswith('BLAST', 1) or
line.startswith('<?xml ')):
self._uhandle.saveline(line)
break
# New style files omit the BLAST line to mark a new query:
if line.startswith("Query="):
if not query:
if not self._header:
self._header = lines[:]
query = True
else:
# Start of another record
self._uhandle.saveline(line)
break
lines.append(line)
if query and "BLAST" not in lines[0]:
# Cheat and re-insert the header
# print "-"*50
# print "".join(self._header)
# print "-"*50
# print "".join(lines)
# print "-"*50
lines = self._header + lines
if not lines:
return None
data = ''.join(lines)
if self._parser is not None:
return self._parser.parse(StringIO(data))
return data
if sys.version_info[0] < 3:
def next(self):
"""Python 2 style alias for Python 3 style __next__ method."""
return self.__next__()
def __iter__(self):
return iter(self.__next__, None)
def _re_search(regex, line, error_msg):
m = re.search(regex, line)
if not m:
raise ValueError(error_msg)
return m.groups()
def _get_cols(line, cols_to_get, ncols=None, expected=None):
if expected is None:
expected = {}
cols = line.split()
# Check to make sure number of columns is correct
if ncols is not None and len(cols) != ncols:
raise ValueError("I expected %d columns (got %d) in line\n%s"
% (ncols, len(cols), line))
# Check to make sure columns contain the correct data
for k in expected:
if cols[k] != expected[k]:
raise ValueError("I expected '%s' in column %d in line\n%s"
% (expected[k], k, line))
# Construct the answer tuple
results = []
for c in cols_to_get:
results.append(cols[c])
return tuple(results)
def _safe_int(str):
try:
return int(str)
except ValueError:
# Something went wrong. Try to clean up the string.
# Remove all commas from the string
str = str.replace(',', '')
# try again after removing commas.
# Note int() will return a long rather than overflow
try:
return int(str)
except ValueError:
pass
# Call float to handle things like "54.3", note could lose precision, e.g.
# >>> int("5399354557888517312")
# 5399354557888517312
# >>> int(float("5399354557888517312"))
# 5399354557888517120
return int(float(str))
def _safe_float(str):
# Thomas Rosleff Soerensen ([email protected]) noted that
# float('e-172') does not produce an error on his platform. Thus,
# we need to check the string for this condition.
# Sometimes BLAST leaves of the '1' in front of an exponent.
if str and str[0] in ['E', 'e']:
str = '1' + str
try:
return float(str)
except ValueError:
# Remove all commas from the string
str = str.replace(',', '')
# try again.
return float(str)
class _BlastErrorConsumer(_BlastConsumer):
def __init__(self):
_BlastConsumer.__init__(self)
def noevent(self, line):
if 'Query must be at least wordsize' in line:
raise ShortQueryBlastError("Query must be at least wordsize")
# Now pass the line back up to the superclass.
method = getattr(_BlastConsumer, 'noevent',
_BlastConsumer.__getattr__(self, 'noevent'))
method(line)
class BlastErrorParser(AbstractParser):
"""Attempt to catch and diagnose BLAST errors while parsing.
This utilizes the BlastParser module but adds an additional layer
of complexity on top of it by attempting to diagnose ValueErrors
that may actually indicate problems during BLAST parsing.
Current BLAST problems this detects are:
o LowQualityBlastError - When BLASTing really low quality sequences
(ie. some GenBank entries which are just short stretches of a single
nucleotide), BLAST will report an error with the sequence and be
unable to search with this. This will lead to a badly formatted
BLAST report that the parsers choke on. The parser will convert the
ValueError to a LowQualityBlastError and attempt to provide useful
information.
"""
def __init__(self, bad_report_handle=None):
"""Initialize a parser that tries to catch BlastErrors.
Arguments:
o bad_report_handle - An optional argument specifying a handle
where bad reports should be sent. This would allow you to save
all of the bad reports to a file, for instance. If no handle
is specified, the bad reports will not be saved.
"""
self._bad_report_handle = bad_report_handle
# self._b_parser = BlastParser()
self._scanner = _Scanner()
self._consumer = _BlastErrorConsumer()
def parse(self, handle):
"""Parse a handle, attempting to diagnose errors.
"""
results = handle.read()
try:
self._scanner.feed(StringIO(results), self._consumer)
except ValueError:
# if we have a bad_report_file, save the info to it first
if self._bad_report_handle:
# send the info to the error handle
self._bad_report_handle.write(results)
# now we want to try and diagnose the error
self._diagnose_error(
StringIO(results), self._consumer.data)
# if we got here we can't figure out the problem
# so we should pass along the syntax error we got
raise
return self._consumer.data
def _diagnose_error(self, handle, data_record):
"""Attempt to diagnose an error in the passed handle.
Arguments:
o handle - The handle potentially containing the error
o data_record - The data record partially created by the consumer.
"""
line = handle.readline()
while line:
# 'Searchingdone' instead of 'Searching......done' seems
# to indicate a failure to perform the BLAST due to
# low quality sequence
if line.startswith('Searchingdone'):
raise LowQualityBlastError("Blast failure occurred on query: ",
data_record.query)
line = handle.readline()
| mit | -429,756,769,175,448,800 | 39.843682 | 107 | 0.56711 | false |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/user_interfaces/mpl_with_glade_316_sgskip.py | 1 | 1165 | """
=========================
Matplotlib With Glade 316
=========================
"""
from gi.repository import Gtk
from matplotlib.figure import Figure
from matplotlib.axes import Subplot
from numpy import arange, sin, pi
from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg as FigureCanvas
class Window1Signals(object):
def on_window1_destroy(self, widget):
Gtk.main_quit()
def main():
builder = Gtk.Builder()
builder.add_objects_from_file("mpl_with_glade_316.glade", ("window1", ""))
builder.connect_signals(Window1Signals())
window = builder.get_object("window1")
sw = builder.get_object("scrolledwindow1")
# Start of Matplotlib specific code
figure = Figure(figsize=(8, 6), dpi=71)
axis = figure.add_subplot(111)
t = arange(0.0, 3.0, 0.01)
s = sin(2*pi*t)
axis.plot(t, s)
axis.set_xlabel('time [s]')
axis.set_ylabel('voltage [V]')
canvas = FigureCanvas(figure) # a Gtk.DrawingArea
canvas.set_size_request(800, 600)
sw.add_with_viewport(canvas)
# End of Matplotlib specific code
window.show_all()
Gtk.main()
if __name__ == "__main__":
main()
| mit | 7,270,442,403,777,310,000 | 24.326087 | 83 | 0.641202 | false |
asymptopia/stepintochinese | StepIntoChinese/cfgmgr.py | 1 | 1685 | """
/**********************************************************
Organization :AsymptopiaSoftware | Software@theLimit
Website :www.asymptopia.org
Author :Charles B. Cosse
Email :[email protected]
Copyright :(C) 2006-2011 Asymptopia Software
License :GPLv3
***********************************************************/
"""
import os,string,shutil,sys
from dict_formatter import *
class CfgMgr:
def __init__(self,parent_class):
self.parent_class=parent_class
self.master=os.path.join(parent_class.SITEPKGDIR,'.stepintochinese_config_master')
self.infname=os.path.join(parent_class.HOMEDIR,'.stepintochinese_config')
self.config=self.load_config()
def get_config(self):
return self.config
def load_config(self):
parent_class=self.parent_class
if not os.path.exists(self.infname):
if os.path.exists(self.master):
shutil.copy(self.master,self.infname)
else:
print 'could not find: %s'%(self.master)
sys.exit()
inf=open(self.infname)
config=eval(inf.read())
inf.close()
return config
def get_value(self,key):
if self.config.has_key(key):
return self.config[key]
else:
return None
def update_config(self,d):
#print 'CfgMgr: update_config\n',d
for key in d.keys():
self.config[key]=d[key]
self.save_config()
self.config=self.load_config()
def set_config(self,d):
#print 'CfgMgr: set_config\n',d
self.config=d
self.save_config()
self.config=self.load_config()
def save_config(self):
#print 'CfgMgr: save_config'
ouf=open(self.infname,'w')
rval=format_dict(self.config,0)
ouf.write(rval)
ouf.close()
| gpl-2.0 | -5,893,147,121,315,494,000 | 19.802469 | 84 | 0.619585 | false |
broadinstitute/toothpick | toothpick/test/hooks_test.py | 1 | 7294 | import toothpick
from toothpick import exceptions
from toothpick import hooks
from toothpick.test import TestResource
from nose.tools import *
hooks.register_hook('side_effect')
hooks.register_hook('too_many_yields')
hooks.register_hook('not_enough_yields')
hooks.register_hook('mixin')
class HookMixin(object):
@hooks.hook_provider('mixin')
def mixin_method(self):
self.calls.append('mixin')
@hooks.after_mixin
def after_mixin(self, *args, **kwargs):
self.calls.append('after_mixin')
class HookResource(TestResource):
def read(self, query, options=None):
return dict(test=1)
@HookResource.register("test", config=None)
class HookModel(HookMixin, toothpick.Base):
def __init__(self, *args, **kwargs):
self.calls = []
return super(HookModel, self).__init__(*args, **kwargs)
@hooks.before_save
def before_save(self, *args, **kwargs):
self.calls.append('before_save')
@hooks.after_save
def after_save(self, *args, **kwargs):
self.calls.append('after_save')
@hooks.before_validation
def before_validation(self, *args, **kwargs):
self.calls.append('before_validation')
@hooks.after_validation
def after_validation(self, *args, **kwargs):
self.calls.append('after_validation')
@hooks.before_create
def before_create(self, *args, **kwargs):
self.calls.append('before_create')
@hooks.after_create
def after_create(self, *args, **kwargs):
self.calls.append('after_create')
@hooks.before_update
def before_update(self, *args, **kwargs):
self.calls.append('before_update')
@hooks.after_update
def after_update(self, *args, **kwargs):
self.calls.append('after_update')
@hooks.before_delete
def before_delete(self, *args, **kwargs):
self.calls.append('before_delete')
@hooks.after_delete
def after_delete(self, *args, **kwargs):
self.calls.append('after_delete')
@hooks.before_modification
def before_modification(self, *args, **kwargs):
self.calls.append('before_modification')
@hooks.after_modification
def after_modification(self, *args, **kwargs):
self.calls.append('after_modification')
@hooks.after_find
def after_find(self, *args, **kwargs):
self.calls.append('after_find')
@hooks.after_init
def after_init(self, *args, **kwargs):
self.calls.append('after_init')
@hooks.hook_provider('side_effect')
def side_effect(self):
self.calls.append('side_effect')
@hooks.before_side_effect
def before_side_effect(self):
self.calls.append('before_side_effect')
@hooks.after_side_effect
def after_side_effect(self):
self.calls.append('after_side_effect')
@hooks.around_side_effect
def around_side_effect_1(self):
self.calls.append('around_before_side_effect_1')
yield
self.calls.append('around_after_side_effect_1')
@hooks.around_side_effect
def around_side_effect_2(self):
self.calls.append('around_before_side_effect_2')
yield
self.calls.append('around_after_side_effect_2')
@hooks.hook_provider('too_many_yields')
def too_many_yields(self):
self.calls.append('too_many_yields')
@hooks.around_too_many_yields
def lots_of_yields(self):
self.calls.append('before_yield_one')
yield
self.calls.append('before_yield_two')
yield
self.calls.append('after_yield_two')
@hooks.hook_provider('not_enough_yields')
def no_yield(self): pass
@hooks.around_not_enough_yields
def not_enough_yields(self):
self.calls.append('nothing')
class TestHooks(object):
def setup(self):
self.found_model = HookModel.find_one(test='found')
self.new_model = HookModel(dict(test={}))
self.clean_found_model = HookModel.find_one(test='clean')
self.clean_new_model = HookModel(dict(test={}))
del self.found_model.calls[:]
del self.new_model.calls[:]
def test_save_new(self):
self.new_model.save()
assert_equals(
self.new_model.calls,
[
'before_validation',
'after_validation',
'before_save',
'before_create',
'after_create',
'after_save'
]
)
def test_save_existing(self):
self.found_model.save()
assert_equals(
self.found_model.calls,
[
'before_validation',
'after_validation',
'before_save',
'before_update',
'after_update',
'after_save'
]
)
def test_validation(self):
self.found_model.valid()
assert_equals(
self.found_model.calls,
[
'before_validation',
'after_validation',
]
)
def test_modify(self):
self.found_model.add_field('test', name='fred')
assert_equals(
self.found_model.calls,
[
'before_modification',
'after_modification',
]
)
def test_delete(self):
self.found_model.delete()
assert_equals(
self.found_model.calls,
[
'before_delete',
'after_delete',
]
)
def test_find(self):
assert_equals(
self.clean_found_model.calls,
[
'after_init',
'after_find'
]
)
def test_init(self):
assert_equals(
self.clean_new_model.calls,
[
'after_init'
]
)
def test_custom_hook(self):
self.found_model.side_effect()
assert_equals(
self.found_model.calls,
[
'before_side_effect',
'around_before_side_effect_1',
'around_before_side_effect_2',
'side_effect',
'around_after_side_effect_2',
'around_after_side_effect_1',
'after_side_effect',
]
)
@raises(RuntimeError)
def test_around_too_many_yields(self):
self.found_model.too_many_yields()
@raises(RuntimeError)
def test_around_no_yield(self):
self.found_model.no_yield()
def test_without_hooks(self):
self.found_model.side_effect.without_hooks(self.found_model)
assert_equals(
self.found_model.calls,
[
'side_effect'
]
)
def test_mixin_hooks(self):
self.found_model.mixin_method()
assert_equals(self.found_model.calls,
[
'mixin',
'after_mixin'
]
)
def test_subclass(self):
class Subclass(HookModel):
pass
# if this were broken, the subclass would have no hooks declared
sub = Subclass()
for hooks in sub._hooks.values():
assert_not_equals([], hooks)
| bsd-3-clause | 1,105,850,361,673,157,600 | 25.523636 | 72 | 0.557444 | false |
OpenGeoscience/girder_db_items | server/dbs/sqlalchemydb.py | 1 | 19936 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import six
import sqlalchemy
import sqlalchemy.engine.reflection
import sqlalchemy.orm
import time
from six.moves import range
from girder import logger as log
from . import base
from .base import DatabaseConnectorException
MAX_SCHEMAS_IN_TABLE_LIST = 25
DatabaseOperators = {
'eq': '=',
'ne': '!=',
'gte': '>=',
'gt': '>',
'lte': '<=',
'lt': '<',
}
_enginePool = {}
_enginePoolMaxSize = 5
def getEngine(uri, **kwargs):
"""
Get a sqlalchemy engine from a pool in case we use the same parameters for
multiple connections.
"""
key = (uri, frozenset(six.viewitems(kwargs)))
engine = _enginePool.get(key)
if engine is None:
engine = sqlalchemy.create_engine(uri, **kwargs)
if len(_enginePool) >= _enginePoolMaxSize:
_enginePoolMaxSize.clear()
_enginePool[key] = engine
return engine
class SQLAlchemyConnector(base.DatabaseConnector):
name = 'sqlalchemy'
def __init__(self, *args, **kwargs):
super(SQLAlchemyConnector, self).__init__(*args, **kwargs)
self.table = kwargs.get('table')
self.schema = kwargs.get('schema')
self.dbEngine = None
self.sessions = {}
# dbparams can include values in http://www.postgresql.org/docs/
# current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS
self.dbparams = kwargs.get('dbparams', {})
self.databaseUri = self.adjustDBUri(kwargs.get('uri'))
# Additional parameters:
# idletime: seconds after which a connection is considered idle
# abandontime: seconds after which a connection will be abandoned
self.dbIdleTime = float(kwargs.get('idletime', 300))
self.dbAbandonTime = float(kwargs.get('abandontime',
self.dbIdleTime * 5))
self.databaseOperators = DatabaseOperators
self.fields = None
self.allowFieldFunctions = True
self.allowSortFunctions = True
self.allowFilterFunctions = True
self.initialized = True
self.types = {type: getattr(sqlalchemy, type) for type in dir(sqlalchemy)
if isinstance(getattr(sqlalchemy, type),
sqlalchemy.sql.visitors.VisitableType)}
class Table(object):
"""
This is used to handle table properties from SQLAlchemy.
"""
pass
self.tableClass = Table
self._allowedFunctions = {
'cast': True,
'count': True,
'distinct': True,
}
def _addFilter(self, filterList, filter):
"""
Add a filter to a list of SQLAlchemy filters.
:param filterList: a list of SQLAlchemy filters which is modified.
:param filter: information on the filter.
:return: the modified list.
"""
if 'group' in filter:
sublist = []
for subfilter in filter['value']:
sublist = self._addFilter(sublist, subfilter)
if filter['group'] == 'and':
filterList.append(sqlalchemy.and_(*sublist))
elif filter['group'] == 'or':
filterList.append(sqlalchemy.or_(*sublist))
return filterList
operator = filter['operator']
operator = base.FilterOperators.get(operator, operator)
operator = self.databaseOperators.get(operator, operator)
field = self._convertFieldOrFunction(filter['field'])
negate = False
if operator.startswith('not_'):
negate = True
operator = operator.split('not_', 1)[1]
if operator == 'in':
values = filter['value']
if not isinstance(values, (list, tuple)):
values = [values]
values = [self._convertFieldOrFunction(value, True)
for value in values]
opfunc = field.in_(values)
elif operator == 'is':
value = self._convertFieldOrFunction(filter['value'], True)
opfunc = field.is_(value)
else:
value = self._convertFieldOrFunction(filter['value'], True)
opfunc = field.op(operator)(value)
if negate:
opfunc = sqlalchemy.not_(opfunc)
filterList.append(opfunc)
return filterList
def _convertFieldOrFunction(self, fieldOrFunction, preferValue=False):
"""
Convert a string to a column reference, or a dictionary to a column or
function reference. If a function is passed, this should be a
canonical function reference ('func' and 'param' are both populated).
:param fieldOrFunction: a string with a column name or a dictionary
with either a field, function, or value.
:param preferValue: if True then if fieldOrFunction is not a
dictionary, return it unchanged.
:returns: a constructed column or function object, or a bare value.
"""
if not isinstance(fieldOrFunction, dict):
if preferValue:
return fieldOrFunction
return getattr(self.tableClass, fieldOrFunction)
if 'field' in fieldOrFunction:
return getattr(self.tableClass, fieldOrFunction['field'])
if 'value' in fieldOrFunction:
if not preferValue:
return sqlalchemy.sql.elements.literal(
fieldOrFunction['value'])
return fieldOrFunction['value']
fieldOrFunction = self.isFunction(fieldOrFunction)
if fieldOrFunction is False:
raise DatabaseConnectorException('Not a function')
if not self._isFunctionAllowed(fieldOrFunction['func']):
raise DatabaseConnectorException('Function %s is not allowed' %
fieldOrFunction['func'])
param = fieldOrFunction.get('param', fieldOrFunction.get('params', []))
# Determine the function we need to call to apply the function
if fieldOrFunction['func'] in ('distinct', 'cast'):
if (fieldOrFunction['func'] == 'cast' and len(param) == 2 and
isinstance(param[1], dict) and 'value' in param[1]):
param[1]['value'] = self.types.get(param[1]['value'], param[1]['value'])
funcfunc = getattr(sqlalchemy, fieldOrFunction['func'])
else:
funcfunc = getattr(sqlalchemy.func, fieldOrFunction['func'])
return funcfunc(
*[self._convertFieldOrFunction(entry, True) for entry in param])
def _isFunctionAllowed(self, proname):
"""
Check if the specified function is allowed. Currently, only
non-volatile functions are allowed, even though there are volatile
functions that are harmless.
:param proname: name of the function to check.
:returns: True is allowed, False is not.
"""
return self._allowedFunctions.get(proname, False)
@classmethod
def adjustDBUri(cls, uri):
"""
Adjust a uri to match the form sqlalchemy requires. In general, the
uri is of the form
dialect+driver://username:password@host:port/database.
:param uri: the uri to adjust.
:returns: the adjusted uri
"""
# If we specifically ask for a URI starting with sqlalchemy: (e.g.,
# sqlalchemy:postgresql://127.0.0.1/database), use this generic class
# rather than our specific sqlalchemy class.
if uri.startswith('sqlalchemy:'):
uri = uri.split('sqlalchemy:', 1)[1]
else:
dialect, _ = base.getDBConnectorClassFromDialect(uri)
uri = '%s://%s' % (dialect, uri.split('://', 1)[1])
return uri
def connect(self, client=None):
"""
Connect to the database.
:param client: if None, use a new session. If specified, if this
client is currently marked in use, cancel the client's
existing query and return a connection from the pool fo
r the client to use.
:return: a SQLAlchemny session object.
"""
if not self.dbEngine:
engine = getEngine(self.databaseUri, **self.dbparams)
metadata = sqlalchemy.MetaData(engine)
table = sqlalchemy.Table(self.table, metadata, schema=self.schema,
autoload=True)
# The orm.mapper is used to refer to our columns. If the table or
# view we are connecting to does not have any primary keys, the
# mapper will fail. Use the first column as a fallback; this is
# only safe because we DON'T alter data; we have no guarantee we
# can refer to a specific row (but we don't need to).
fallbackPrimaryCol = None
for col in table.c:
if col.primary_key:
fallbackPrimaryCol = None
break
if fallbackPrimaryCol is None:
fallbackPrimaryCol = col
sqlalchemy.orm.mapper(
self.tableClass, table, primary_key=fallbackPrimaryCol)
self.dbEngine = engine
# If we are asking for a specific client, clean up defunct clients
curtime = time.time()
if client:
for oldsess in list(self.sessions):
idle = curtime - self.sessions[oldsess]['last']
if ((idle > self.dbIdleTime and
not self.sessions[oldsess]['used']) or
idle > self.dbAbandonTime):
# Close the session. sqlalchemy keeps them too long
# otherwise
self.sessions[oldsess]['session'].close()
del self.sessions[oldsess]
# Cancel an existing query
if client in self.sessions and self.sessions[client]['used']:
self.sessions[client]['session'].connection().connection.cancel()
self.sessions[client]['session'].rollback()
self.sessions[client]['used'] = False
if client in self.sessions:
sess = self.sessions[client]['session']
# Always ensure a fresh query
sess.rollback()
else:
sess = sqlalchemy.orm.sessionmaker(bind=self.dbEngine)()
# This is a further guard against changing the database. It isn't
# a real guard against change, as if we somehow allow an injection
# attack, it could be turned off. Also, volatile functions can
# still have side effects (for instance, setseed() changes the
# state for generating random numbers which could have
# cryptographic implications).
self.setSessionReadOnly(sess)
if client:
if client not in self.sessions:
self.sessions[client] = {}
self.sessions[client]['used'] = True
self.sessions[client]['last'] = curtime
self.sessions[client]['session'] = sess
return sess
def disconnect(self, db, client=None):
"""
Mark that a client has finished with a database connection and it can
be closed or reused without any issue.
:param db: the database connection to mark as finished.
:param client: the client that owned this connection.
"""
if client in self.sessions:
self.sessions[client]['used'] = False
else:
# Close the session. sqlalchemy keeps them too long otherwise
db.close()
def setSessionReadOnly(self, sess):
"""
Set the specified session to read only if possible. Subclasses should
implement the appropriate behavior.
:param sess: the session to adjust.
"""
pass
def getFieldInfo(self):
"""
Return a list of fields that are known and can be queried.
:return: a list of known fields. Each entry is a dictionary with name,
datatype, and optionally a description.
"""
if self.fields is not None:
return self.fields
db = self.connect()
fields = []
for column in sqlalchemy.orm.class_mapper(
self.tableClass).iterate_properties:
if (isinstance(column, sqlalchemy.orm.ColumnProperty) and
len(column.columns) == 1):
try:
coltype = str(column.columns[0].type)
except sqlalchemy.exc.CompileError:
coltype = 'unknown'
fields.append({
'name': column.key,
'type': coltype
})
self.disconnect(db)
if len(fields):
self.fields = fields
return fields
@classmethod
def getTableList(cls, uri, internalTables=False, dbparams={}, **kwargs):
"""
Get a list of known databases, each of which has a list of known tables
from the database. This is of the form [{'database': (database),
'tables': [{'schema': (schema), 'table': (table 1)}, ...]}]
:param uri: uri to connect to the database.
:param internaltables: True to return tables about the database itself.
:param dbparams: optional parameters to send to the connection.
:returns: A list of known tables.
"""
dbEngine = sqlalchemy.create_engine(cls.adjustDBUri(uri), **dbparams)
insp = sqlalchemy.engine.reflection.Inspector.from_engine(dbEngine)
schemas = insp.get_schema_names()
defaultSchema = insp.default_schema_name
tables = [{'name': table, 'table': table}
for table in dbEngine.table_names()]
tables.extend([{'name': view, 'table': view}
for view in insp.get_view_names()])
databaseName = base.databaseFromUri(uri)
results = [{'database': databaseName, 'tables': tables}]
if len(schemas) <= MAX_SCHEMAS_IN_TABLE_LIST:
for schema in schemas:
if not internalTables and schema.lower() == 'information_schema':
continue
if schema != defaultSchema:
tables = [{'name': '%s.%s' % (schema, table),
'table': table, 'schema': schema}
for table in dbEngine.table_names(schema=schema)]
tables.extend([{'name': '%s.%s' % (schema, view),
'table': view, 'schema': schema}
for view in insp.get_view_names(schema=schema)])
results[0]['tables'].extend(tables)
else:
log.info('Not enumerating all schemas for table list (%d schemas)', len(schemas))
return results
def performSelect(self, fields, queryProps={}, filters=[], client=None):
"""
Perform a select query. The results are passed back as a dictionary
with the following values:
limit: the limit used in the query
offset: the offset used in the query
sort: the list of sort parameters used in the query.
fields: a list of the fields that are being returned in the order
that they are returned.
data: a list with one entry per row of results. Each entry is a list
with one entry per column.
:param fields: the results from getFieldInfo.
:param queryProps: general query properties, including limit, offset,
and sort.
:param filters: a list of filters to apply.
:param client: if a client is specified, a previous query made by this
client can be cancelled.
:return: the results of the query. See above.
"""
if queryProps.get('fields') is None:
queryProps['fields'] = [field['name'] for field in fields]
result = {
'limit': queryProps.get('limit'),
'offset': queryProps.get('offset'),
'sort': queryProps.get('sort'),
'fields': queryProps.get('fields'),
'data': []
}
sess = self.connect(client)
query = sess.query(self.tableClass)
filterQueries = []
for filter in filters:
filterQueries = self._addFilter(filterQueries, filter)
if len(filterQueries):
query = query.filter(sqlalchemy.and_(*filterQueries))
if queryProps.get('group'):
groups = [self._convertFieldOrFunction(field)
for field in queryProps['group']]
if len(groups):
query = query.group_by(*groups)
if queryProps.get('sort'):
sortList = []
for pos in range(len(queryProps['sort'])):
sort = queryProps['sort'][pos]
sortCol = self._convertFieldOrFunction(sort[0])
if sort[1] == -1:
sortCol = sortCol.desc()
sortList.append(sortCol)
query = query.order_by(*sortList)
if (queryProps.get('limit') is not None and
int(queryProps['limit']) >= 0):
query = query.limit(int(queryProps['limit']))
if 'offset' in queryProps:
query = query.offset(int(queryProps['offset']))
columns = [self._convertFieldOrFunction(field)
for field in queryProps['fields']]
# Clone the query and set it to return the columns we are interested
# in. Using result['data'] = list(query.values(*columns)) is more
# compact and skips one internal _clone call, but doesn't allow logging
# the actual sql used. with_entities clears the columns we are
# selecting (it defaults to all of the native table columns), and
# add_columns puts back just what we want, including expressions.
query = query.with_entities(*[])
query = query.add_columns(*columns)
log.info('Query: %s', ' '.join(str(query.statement.compile(
bind=sess.get_bind(),
compile_kwargs={'literal_binds': True})).split()))
result['data'] = list(query)
self.disconnect(sess, client)
return result
@staticmethod
def validate(table=None, **kwargs):
"""
Validate that the passed arguments are sufficient for connecting to the
database.
:returns: True if the arguments should allow connecting to the db.
"""
if not table or not kwargs.get('uri'):
return False
# We could validate other database parameters, too
return True
# Make a list of the dialects this module supports. There is no default
# dialect.
_dialects = {
'dialects': {},
'priority': 1,
}
for dialect in getattr(sqlalchemy.dialects, '__all__', []):
_dialects['dialects'][dialect] = dialect
base.registerConnectorClass(SQLAlchemyConnector.name, SQLAlchemyConnector,
_dialects)
| apache-2.0 | 3,844,353,940,656,065,000 | 40.620042 | 93 | 0.578953 | false |
0111001101111010/hyde | hyde/tests/test_model.py | 1 | 5339 | # -*- coding: utf-8 -*-
"""
Use nose
`$ pip install nose`
`$ nosetests`
"""
from hyde.model import Config, Expando
from fswrap import File, Folder
def test_expando_one_level():
d = {"a": 123, "b": "abc"}
x = Expando(d)
assert x.a == d['a']
assert x.b == d['b']
def test_expando_two_levels():
d = {"a": 123, "b": {"c": 456}}
x = Expando(d)
assert x.a == d['a']
assert x.b.c == d['b']['c']
def test_expando_three_levels():
d = {"a": 123, "b": {"c": 456, "d": {"e": "abc"}}}
x = Expando(d)
assert x.a == d['a']
assert x.b.c == d['b']['c']
assert x.b.d.e == d['b']['d']['e']
def test_expando_update():
d1 = {"a": 123, "b": "abc"}
x = Expando(d1)
assert x.a == d1['a']
assert x.b == d1['b']
d = {"b": {"c": 456, "d": {"e": "abc"}}, "f": "lmn"}
x.update(d)
assert x.a == d1['a']
assert x.b.c == d['b']['c']
assert x.b.d.e == d['b']['d']['e']
assert x.f == d["f"]
d2 = {"a": 789, "f": "opq"}
y = Expando(d2)
x.update(y)
assert x.a == 789
assert x.f == "opq"
def test_expando_to_dict():
d = {"a": 123, "b": {"c": 456, "d": {"e": "abc"}}}
x = Expando(d)
assert d == x.to_dict()
def test_expando_to_dict_with_update():
d1 = {"a": 123, "b": "abc"}
x = Expando(d1)
d = {"b": {"c": 456, "d": {"e": "abc"}}, "f": "lmn"}
x.update(d)
expected = {}
expected.update(d1)
expected.update(d)
assert expected == x.to_dict()
d2 = {"a": 789, "f": "opq"}
y = Expando(d2)
x.update(y)
expected.update(d2)
assert expected == x.to_dict()
TEST_SITE = File(__file__).parent.child_folder('_test')
import yaml
class TestConfig(object):
@classmethod
def setup_class(cls):
cls.conf1 = """
mode: development
content_root: stuff # Relative path from site root
media_root: media # Relative path from site root
media_url: /media
widgets:
plugins:
aggregators:
"""
cls.conf2 = """
mode: development
deploy_root: ~/deploy_site
content_root: site/stuff # Relative path from site root
media_root: mmm # Relative path from site root
media_url: /media
widgets:
plugins:
aggregators:
"""
def setUp(self):
TEST_SITE.make()
TEST_SITE.parent.child_folder('sites/test_jinja').copy_contents_to(TEST_SITE)
def tearDown(self):
TEST_SITE.delete()
def test_default_configuration(self):
c = Config(sitepath=TEST_SITE, config_dict={})
for root in ['content', 'layout']:
name = root + '_root'
path = name + '_path'
assert hasattr(c, name)
assert getattr(c, name) == root
assert hasattr(c, path)
assert getattr(c, path) == TEST_SITE.child_folder(root)
assert c.media_root_path == c.content_root_path.child_folder('media')
assert hasattr(c, 'plugins')
assert len(c.plugins) == 0
assert hasattr(c, 'ignore')
assert c.ignore == ["*~", "*.bak", ".hg", ".git", ".svn"]
assert c.deploy_root_path == TEST_SITE.child_folder('deploy')
assert c.not_found == '404.html'
assert c.meta.nodemeta == 'meta.yaml'
def test_conf1(self):
c = Config(sitepath=TEST_SITE, config_dict=yaml.load(self.conf1))
assert c.content_root_path == TEST_SITE.child_folder('stuff')
def test_conf2(self):
c = Config(sitepath=TEST_SITE, config_dict=yaml.load(self.conf2))
assert c.content_root_path == TEST_SITE.child_folder('site/stuff')
assert c.media_root_path == c.content_root_path.child_folder('mmm')
assert c.media_url == TEST_SITE.child_folder('/media')
assert c.deploy_root_path == Folder('~/deploy_site')
def test_read_from_file_by_default(self):
File(TEST_SITE.child('site.yaml')).write(self.conf2)
c = Config(sitepath=TEST_SITE)
assert c.content_root_path == TEST_SITE.child_folder('site/stuff')
assert c.media_root_path == c.content_root_path.child_folder('mmm')
assert c.media_url == TEST_SITE.child_folder('/media')
assert c.deploy_root_path == Folder('~/deploy_site')
def test_read_from_specified_file(self):
File(TEST_SITE.child('another.yaml')).write(self.conf2)
c = Config(sitepath=TEST_SITE, config_file='another.yaml')
assert c.content_root_path == TEST_SITE.child_folder('site/stuff')
assert c.media_root_path == c.content_root_path.child_folder('mmm')
assert c.media_url == TEST_SITE.child_folder('/media')
assert c.deploy_root_path == Folder('~/deploy_site')
def test_extends(self):
another = """
extends: site.yaml
mode: production
media_root: xxx
"""
File(TEST_SITE.child('site.yaml')).write(self.conf2)
File(TEST_SITE.child('another.yaml')).write(another)
c = Config(sitepath=TEST_SITE, config_file='another.yaml')
assert c.mode == 'production'
assert c.content_root_path == TEST_SITE.child_folder('site/stuff')
assert c.media_root_path == c.content_root_path.child_folder('xxx')
assert c.media_url == TEST_SITE.child_folder('/media')
assert c.deploy_root_path == Folder('~/deploy_site')
| mit | -7,763,387,247,039,466,000 | 32.36875 | 85 | 0.561341 | false |
lingdb/CoBL-public | ielex/lexicon/migrations/306_0127_fix_issue_223.py | 1 | 1524 | # -*- coding: utf-8 -*-
# Inspired by:
# https://github.com/lingdb/CoBL/issues/223#issuecomment-256815113
from __future__ import unicode_literals, print_function
from django.db import migrations
from ielex.source_scripts.handle_duplicate_sources import handle_sources, \
sourcesExist
sources_changes = {'merge': {369: [362],
157: [272],
78: [39],
33: [71],
93: [94],
236: [298],
44: [45],
365: [366],
368: [370],
84: [400],
423: [424, 425, 461]},
'delete': [21],
'deprecate': [42, 43, 44, 57, 58, 61, 64, 87, 88, 89,
106, 107, 108, 109, 110, 111, 112, 113,
114, 115, 117, 118, 119, 120, 123, 139,
233]
}
def forwards_func(apps, schema_editor):
Source = apps.get_model('lexicon', 'Source')
if sourcesExist(sources_changes, Source):
handle_sources(sources_changes)
def reverse_func(apps, schema_editor):
print('Reverse of 306_0127_fix_issue223 does nothing.')
class Migration(migrations.Migration):
dependencies = [('lexicon', '306_0126_auto_20161027_0315')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
| bsd-2-clause | 3,087,492,751,820,296,700 | 33.636364 | 75 | 0.468504 | false |
MikeHoffert/caladbolg-engine | caladbolg/agents/character.py | 1 | 2442 | import json
from caladbolg.agents import formulas
from caladbolg.agents.stats import Stats, EquipmentStats
class Character:
"""
Represents a player character.
Characters have stats, equipment, and leveling information.
"""
def __init__(self, character_file):
self.level = 1
self.experience_into_level = 0
self.name = None
self.equipment_classes = None
self.stats = None
self.leveling_formula = None
self.load_character_file(character_file)
def load_character_file(self, character_file):
"""
Loads a character JSON file, setting the name, equipment classes, and stats.
The ``name`` is naturally the character's name as it should appear in-game. While there's no hard limit, it
should be kept short to prevent it from being truncated or overlapping.
The ``equipment_classes`` is a set of string "classes" corresponding to which classes of equipment can
be equipped. Each piece of equipment has one or more classes. If there's at least one class in common, we can
equip the piece of equipment. The classes are not mentioned in-game, as the naming is intended to be internal.
The ``stats`` is the character's ``Stats`` instance. The equipment stats are not initialized yet as they are
not known at this time.
:param character_file: The file to load the character data from.
"""
character_json = json.load(character_file)
self.name = character_json['name']
self.equipment_classes = character_json['equipment_classes']
self.stats = Stats(character_json['base_stats']['health'],
character_json['base_stats']['stamina'],
character_json['base_stats']['strength'],
character_json['base_stats']['magic'],
character_json['base_stats']['endurance'],
character_json['base_stats']['agility'])
try:
self.leveling_formula = formulas.leveling_formulas[character_json['leveling_formula']]
except KeyError:
raise ValueError('No leveling formula named "{0}" ({1})'.format(character_json['leveling_formula'],
character_file.name)) from None
def __str__(self):
return str(self.__dict__)
| mit | 2,006,252,693,176,594,400 | 44.075472 | 118 | 0.613432 | false |
analyseuc3m/ANALYSE-v1 | common/test/acceptance/tests/studio/test_studio_home.py | 1 | 7558 | """
Acceptance tests for Home Page (My Courses / My Libraries).
"""
from bok_choy.web_app_test import WebAppTest
from opaque_keys.edx.locator import LibraryLocator
from uuid import uuid4
from ...fixtures import PROGRAMS_STUB_URL
from ...fixtures.config import ConfigModelFixture
from ...fixtures.programs import ProgramsFixture
from ...pages.studio.auto_auth import AutoAuthPage
from ...pages.studio.library import LibraryEditPage
from ...pages.studio.index import DashboardPage, DashboardPageWithPrograms
from ...pages.lms.account_settings import AccountSettingsPage
from ..helpers import (
select_option_by_text,
get_selected_option_text
)
class CreateLibraryTest(WebAppTest):
"""
Test that we can create a new content library on the studio home page.
"""
def setUp(self):
"""
Load the helper for the home page (dashboard page)
"""
super(CreateLibraryTest, self).setUp()
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPage(self.browser)
def test_create_library(self):
"""
From the home page:
Click "New Library"
Fill out the form
Submit the form
We should be redirected to the edit view for the library
Return to the home page
The newly created library should now appear in the list of libraries
"""
unique_suffix = uuid4().hex[:4]
name = "New Library Name " + unique_suffix
org = "TestOrgX" + unique_suffix
number = "TESTLIB_" + unique_suffix
self.auth_page.visit()
self.dashboard_page.visit()
self.assertFalse(self.dashboard_page.has_library(name=name, org=org, number=number))
self.assertTrue(self.dashboard_page.has_new_library_button())
self.dashboard_page.click_new_library()
self.assertTrue(self.dashboard_page.is_new_library_form_visible())
self.dashboard_page.fill_new_library_form(name, org, number)
self.assertTrue(self.dashboard_page.is_new_library_form_valid())
self.dashboard_page.submit_new_library_form()
# The next page is the library edit view; make sure it loads:
lib_page = LibraryEditPage(self.browser, LibraryLocator(org, number))
lib_page.wait_for_page()
# Then go back to the home page and make sure the new library is listed there:
self.dashboard_page.visit()
self.assertTrue(self.dashboard_page.has_library(name=name, org=org, number=number))
class DashboardProgramsTabTest(WebAppTest):
"""
Test the programs tab on the studio home page.
"""
def setUp(self):
super(DashboardProgramsTabTest, self).setUp()
ProgramsFixture().install_programs([])
self.auth_page = AutoAuthPage(self.browser, staff=True)
self.dashboard_page = DashboardPageWithPrograms(self.browser)
self.auth_page.visit()
def set_programs_api_configuration(self, is_enabled=False, api_version=1, api_url=PROGRAMS_STUB_URL,
js_path='/js', css_path='/css'):
"""
Dynamically adjusts the programs API config model during tests.
"""
ConfigModelFixture('/config/programs', {
'enabled': is_enabled,
'enable_studio_tab': is_enabled,
'enable_student_dashboard': is_enabled,
'api_version_number': api_version,
'internal_service_url': api_url,
'public_service_url': api_url,
'authoring_app_js_path': js_path,
'authoring_app_css_path': css_path,
'cache_ttl': 0
}).install()
def test_tab_is_disabled(self):
"""
The programs tab and "new program" button should not appear at all
unless enabled via the config model.
"""
self.set_programs_api_configuration()
self.dashboard_page.visit()
self.assertFalse(self.dashboard_page.is_programs_tab_present())
self.assertFalse(self.dashboard_page.is_new_program_button_present())
def test_tab_is_enabled_with_empty_list(self):
"""
The programs tab and "new program" button should appear when enabled
via config. When the programs list is empty, a button should appear
that allows creating a new program.
"""
self.set_programs_api_configuration(True)
self.dashboard_page.visit()
self.assertTrue(self.dashboard_page.is_programs_tab_present())
self.assertTrue(self.dashboard_page.is_new_program_button_present())
results = self.dashboard_page.get_program_list()
self.assertEqual(results, [])
self.assertTrue(self.dashboard_page.is_empty_list_create_button_present())
def test_tab_is_enabled_with_nonempty_list(self):
"""
The programs tab and "new program" button should appear when enabled
via config, and the results of the program list should display when
the list is nonempty.
"""
test_program_values = [('first program', 'org1'), ('second program', 'org2')]
ProgramsFixture().install_programs(test_program_values)
self.set_programs_api_configuration(True)
self.dashboard_page.visit()
self.assertTrue(self.dashboard_page.is_programs_tab_present())
self.assertTrue(self.dashboard_page.is_new_program_button_present())
results = self.dashboard_page.get_program_list()
self.assertEqual(results, test_program_values)
self.assertFalse(self.dashboard_page.is_empty_list_create_button_present())
def test_tab_requires_staff(self):
"""
The programs tab and "new program" button will not be available, even
when enabled via config, if the user is not global staff.
"""
self.set_programs_api_configuration(True)
AutoAuthPage(self.browser, staff=False).visit()
self.dashboard_page.visit()
self.assertFalse(self.dashboard_page.is_programs_tab_present())
self.assertFalse(self.dashboard_page.is_new_program_button_present())
class StudioLanguageTest(WebAppTest):
""" Test suite for the Studio Language """
def setUp(self):
super(StudioLanguageTest, self).setUp()
self.dashboard_page = DashboardPage(self.browser)
self.account_settings = AccountSettingsPage(self.browser)
AutoAuthPage(self.browser).visit()
def test_studio_language_change(self):
"""
Scenario: Ensure that language selection is working fine.
First I go to the user dashboard page in studio. I can see 'English' is selected by default.
Then I choose 'Dummy Language' from drop down (at top of the page).
Then I visit the student account settings page and I can see the language has been updated to 'Dummy Language'
in both drop downs.
"""
dummy_language = u'Dummy Language (Esperanto)'
self.dashboard_page.visit()
language_selector = self.dashboard_page.language_selector
self.assertEqual(
get_selected_option_text(language_selector),
u'English'
)
select_option_by_text(language_selector, dummy_language)
self.dashboard_page.wait_for_ajax()
self.account_settings.visit()
self.assertEqual(self.account_settings.value_for_dropdown_field('pref-lang'), dummy_language)
self.assertEqual(
get_selected_option_text(language_selector),
u'Dummy Language (Esperanto)'
)
| agpl-3.0 | 5,568,737,444,139,585,000 | 40.527473 | 118 | 0.656258 | false |
Parclytaxel/Kinross | kinback/algebra.py | 1 | 1782 | # Numerical algebra and methods
# Parcly Taxel / Jeremy Tan, 2018
# https://parclytaxel.tumblr.com
import numpy as np
cc_abscissas = [(np.cos(np.arange(1, 2 ** k, 2) / 2 ** k * np.pi) + 1) / 2 for k in range(2, 11)]
def ccw_generate(n):
"""Clenshaw-Curtis weights for n+1 samples where n is a power of two.
DFT-based algorithm from Jörg Waldvogel (http://www.sam.math.ethz.ch/~waldvoge/Papers/fejer.pdf)."""
w0 = 1 / (n ** 2 - 1)
v = [2 / (1 - 4 * k ** 2) - w0 for k in range(n // 2)]
dft = np.fft.rfft(v + [-3 * w0] + v[:0:-1]).real / n
return np.append(dft, dft[-2::-1]) # ensures mathematically guaranteed symmetry of returned array
cc_weights = [ccw_generate(2 ** k) for k in range(2, 11)]
def ccquad(f, a, b):
"""Clenshaw-Curtis quadrature of f in [a, b]. f must be applicable elementwise to NumPy arrays (if not, use vectorize first)."""
fs = [f(a), f((a + b) / 2), f(b)]
res = (fs[0] + 4 * fs[1] + fs[2]) / 3
for q in range(9):
fs = np.insert(fs, range(1, len(fs)), f((b - a) * cc_abscissas[q] + a))
prev, res = res, np.dot(cc_weights[q], fs)
if abs(res - prev) <= 1e-12 * abs(prev): break
return (b - a) / 2 * res
def newton(f, fp, x0, y = 0):
"""Newton's method for solving f(x) = y. fp is the derivative of f."""
x = x0
for q in range(16):
denom = fp(x)
if abs(denom) == 0: break
delta = (y - f(x)) / denom
x += delta
if abs(delta) < 1e-12 * abs(x): break
return x
def quadricl2m(p):
"""Converts the list representation of a quadric, [a, b, c, d, e, f] with with ax²+bxy+cy²+dx+ey+f=0, into the symmetric matrix representation."""
return np.array([[p[0], p[1] / 2, p[3] / 2], [p[1] / 2, p[2], p[4] / 2], [p[3] / 2, p[4] / 2, p[5]]])
| mit | -3,334,467,633,558,380,000 | 44.615385 | 150 | 0.562114 | false |
StarfruitStack/crabapple | crabapple/admin/controller/deployment.py | 1 | 2202 | # Copyright 2014 The crabapple Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import datetime
from flask import render_template, redirect, request
from crabapple.objects import Commit, Deployment, DeploymentStatus
class ControllerDeployment(object):
def __init__(self, server):
self.server = server
def view_deployments(self):
return render_template('index.html',
deployments=self.server.store.get_all_deployments(),
specs={o.id: o for o in self.server.store.get_all_specs()})
def view_deployment(self, deployment_id):
deployment_object = self.server.store.get_deployment(deployment_id)
if deployment_object is None:
return redirect('/')
content = ''
try:
with open(self.server.config.logdir + '/' + str(deployment_object.id) + '.log') as f:
content = f.read()
except IOError:
content = ''
return render_template('deployment_view.html', deployment=deployment_object, content=content)
def view_deploy(self):
if request.method == 'GET':
return render_template('deploy.html', specs=self.server.store.get_all_specs())
elif request.method == 'POST':
spec = request.form['spec']
commit = request.form['commit']
c = Commit()
c.hash = commit
o = Deployment(status=DeploymentStatus.SCHEDULED,
triggered_time=datetime.datetime.now())
o.spec_id = int(spec)
o.branch = '* Manual *'
o.triggered_commit = c
o.pusher_name = 'admin'
o.pusher_email = '-'
self.server.trigger_deployment(o)
return redirect('/deployments')
def register(self, app):
app.add_url_rule('/deployments', 'view_deployments', self.view_deployments)
app.add_url_rule('/deploy', 'view_deploy', self.view_deploy, methods=['GET', 'POST'])
app.add_url_rule('/deployment/<int:deployment_id>', 'view_deployment', self.view_deployment)
| bsd-3-clause | 6,292,805,605,715,429,000 | 37.631579 | 101 | 0.601272 | false |
nitely/Spirit | spirit/core/utils/ratelimit/ratelimit.py | 1 | 3853 | # -*- coding: utf-8 -*-
import hashlib
import time
from django.core.cache import caches
from ...conf import settings
from ..deprecations import warn
__all__ = ['RateLimit']
TIME_DICT = {
's': 1,
'm': 60}
def validate_cache_config():
try:
cache = settings.CACHES[settings.ST_RATELIMIT_CACHE]
except KeyError:
# Django will raise later when using
# this cache so we do nothing
return
if (not settings.ST_RATELIMIT_SKIP_TIMEOUT_CHECK and
cache.get('TIMEOUT', 1) is not None):
# todo: ConfigurationError in next version
warn(
'settings.ST_RATELIMIT_CACHE cache\'s TIMEOUT '
'must be None (never expire) and it may '
'be other than the default cache. '
'To skip this check, for example when using '
'a third-party backend with no TIMEOUT option, set '
'settings.ST_RATELIMIT_SKIP_TIMEOUT_CHECK to True. '
'This will raise an exception in next version.')
def split_rate(rate):
limit, period = rate.split('/')
limit = int(limit)
if len(period) > 1:
time_ = TIME_DICT[period[-1]]
time_ *= int(period[:-1])
else:
time_ = TIME_DICT[period]
return limit, time_
def fixed_window(period):
if settings.ST_TESTS_RATELIMIT_NEVER_EXPIRE:
return 0
if not period: # todo: assert on Spirit 0.5
warn('Period must be greater than 0.')
return time.time() # Closer to no period
timestamp = int(time.time())
return timestamp - timestamp % period
def make_hash(key):
return (hashlib
.sha1(key.encode('utf-8'))
.hexdigest())
class RateLimit:
def __init__(self, request, uid, methods=None, field=None, rate='5/5m'):
validate_cache_config()
self.request = request
self.uid = uid
self.methods = methods or ['POST']
self.rate = rate
self.limit = None
self.time = None
self.cache_keys = []
if self.request.method in self.methods:
self.limit, self.time = split_rate(rate)
self.cache_keys = self._get_keys(field)
def _make_key(self, key):
key_uid = '%s:%s:%d' % (
self.uid, key, fixed_window(self.time))
return '%s:%s' % (
settings.ST_RATELIMIT_CACHE_PREFIX,
make_hash(key_uid))
def _get_keys(self, field=None):
keys = []
if self.request.user.is_authenticated:
keys.append('user:%d' % self.request.user.pk)
else:
keys.append('ip:%s' % self.request.META['REMOTE_ADDR'])
if field is not None:
field_value = (getattr(self.request, self.request.method)
.get(field, ''))
if field_value:
keys.append('field:%s:%s' % (field, field_value))
return [self._make_key(k) for k in keys]
def _get_cache_values(self):
return (caches[settings.ST_RATELIMIT_CACHE]
.get_many(self.cache_keys))
def _incr(self, key):
cache = caches[settings.ST_RATELIMIT_CACHE]
cache.add(key, 0)
try:
# This resets the timeout to
# default, see Django ticket #26619
return cache.incr(key)
except ValueError: # Key does not exists
# The cache is being
# pruned too frequently
return 1
def incr(self):
return [self._incr(k) for k in self.cache_keys]
def is_limited(self, increment=True):
if not settings.ST_RATELIMIT_ENABLE:
return False
if increment:
cache_values = self.incr()
else:
cache_values = self._get_cache_values()
return any(
count > self.limit
for count in cache_values)
| mit | -127,130,421,655,364,450 | 25.944056 | 76 | 0.563717 | false |
joytunes/JTLocalize | localization_flow/jtlocalize/core/handle_duplicates_in_localization.py | 1 | 2004 | #!/usr/bin/env python
from localization_utils import *
import argparse
def parse_args():
""" Parses the arguments given in the command line
Returns:
args: The configured arguments will be attributes of the returned object.
"""
parser = argparse.ArgumentParser(description='Omits duplications in a given localizable file.')
parser.add_argument("localizable_file", help="The file that requires duplication handling.")
parser.add_argument("--log_path", default="", help="The log file path")
return parser.parse_args()
def handle_duplications(file_path):
""" Omits the duplications in the strings files.
Keys that appear more than once, will be joined to one appearance and the omit will be documented.
Args:
file_path (str): The path to the strings file.
"""
logging.info('Handling duplications for "%s"', file_path)
f = open_strings_file(file_path, "r+")
comment_key_value_tuples = extract_comment_key_value_tuples_from_file(f)
file_elements = []
keys_to_objects = {}
duplicates_found = []
for comments, key, value in comment_key_value_tuples:
if key in keys_to_objects:
keys_to_objects[key].add_comments(comments)
duplicates_found.append(key)
else:
loc_obj = LocalizationEntry(comments, key, value)
keys_to_objects[key] = loc_obj
file_elements.append(loc_obj)
# Sort by key
file_elements = sorted(file_elements, key=lambda x: x.key)
f.seek(0)
for element in file_elements:
f.write(unicode(element))
f.write(u"\n")
f.truncate()
f.close()
logging.info("Omitted %d duplicates (%s)" % (len(duplicates_found), ",".join(duplicates_found)))
logging.info('Finished handling duplications for "%s"', file_path)
# The main method for simple command line run.
if __name__ == "__main__":
args = parse_args()
setup_logging(args)
handle_duplications(args.localizable_file)
| mit | -1,579,286,691,620,859,600 | 29.363636 | 106 | 0.65519 | false |
berkeley-stat159/project-alpha | code/utils/functions/tgrouping.py | 1 | 3121 | from __future__ import absolute_import, division, print_function
from mask_phase_2_dimension_change import neighbor_smoothing
from mask_phase_2_dimension_change import masking_reshape_start, masking_reshape_end
import numpy as np
def t_binary_grouping(t, cutoff, prop = False, abs_on = False):
"""
Evaluated the t values above a cutoff or proportion
Parameters
----------
t: t-value of the betas 1d numpy array
cutoff: the limit for the false discovery rate
prop: logical~ if the cutoff is a proportion or a value
abs_on: logical~ if we want to take absolute value of the t input
Returns
-------
zero_one: vector of ones and zeros where ones are above the cutoff, and zeros are below
cutoff: the limit for the false discovery rate
Notes
-----
If you want the values to be preserved multiply t*zero_one afterwards
"""
# if you want to use proportion you'll need to provide a logical cutoff value
assert(0 <= cutoff*prop and cutoff*prop <= 1)
# just to be safe:
t= np.ravel(t)
# if we'd like to take into account abs(t)
if abs_on:
t = np.abs(t)
# sexy shorting
t_sorted = np.sort(t)
if prop:
num = int((1 - cutoff)*t.shape[0])
cutoff = t_sorted[num]
zero_one = np.zeros(t.shape)
zero_one[t >= cutoff] = 1
return zero_one, cutoff
def t_grouping_neighbor(t_3d, mask, cutoff, neighbors = None,
prop = False, abs_on = False, binary = True, off_value = 0, masked_value = .5):
"""
Masks a 3d array, does t_binary_grouping, and does neighboring
Parameters
----------
t_3d: t-value of the betas 3d numpy array
mask: a 3d numpy array of 0s and 1s that has the same shape as t_3d
cutoff: the limit for the false discovery rate
neighbors: number of neighbors for neighbor smoothing (must have binary be true)
prop: logical~ if the cutoff is a proportion or a value
abs_on: logical~ if we want to take absolute value of the t input
binary: if binary, then off_value is ignored and 0 is used as the
off_value, 1 as the on value
off_value: the value of those not selected
Returns
-------
output_3d: a 3d numpy array same size as the t_3d with either:
(1) binary on_off values for inside the mask and "masked_value"
for values outside mask or (2) t values to the accepted values,
and "off_values" for lost values, and "masked_value" for values
outside mask. MOREOVER, it can have had neighbor smoothing applied
the binary case
cutoff: the limit for the false discovery rate
"""
if neighbors != None and binary == False:
return False
t_1d = masking_reshape_start(t_3d, mask)
t_1d = np.ravel(t_1d)
zero_one, cutoff = t_binary_grouping(t_1d, cutoff, prop, abs_on)
if not binary:
t_1d = t_1d*zero_one + off_value*(1 - zero_one)
else:
t_1d = zero_one
output_3d = masking_reshape_end(t_1d, mask, masked_value)
if neighbors != None:
output_3d = neighbor_smoothing(output_3d, neighbors)
return output_3d, cutoff
| bsd-3-clause | 1,689,663,783,410,457,000 | 31.175258 | 92 | 0.65588 | false |
ksmit799/Toontown-Source | toontown/golf/DistributedGolfHoleAI.py | 1 | 18607 | from direct.distributed import DistributedObjectAI
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from pandac.PandaModules import *
import DistributedPhysicsWorldAI
from direct.fsm.FSM import FSM
from toontown.ai.ToonBarrier import *
from toontown.golf import GolfGlobals
import random
from toontown.golf import GolfHoleBase
class DistributedGolfHoleAI(DistributedPhysicsWorldAI.DistributedPhysicsWorldAI, FSM, GolfHoleBase.GolfHoleBase):
defaultTransitions = {'Off': ['Cleanup', 'WaitTee'],
'WaitTee': ['WaitSwing',
'Cleanup',
'WaitTee',
'WaitPlayback'],
'WaitSwing': ['WaitPlayback',
'Cleanup',
'WaitSwing',
'WaitTee'],
'WaitPlayback': ['WaitSwing',
'Cleanup',
'WaitTee',
'WaitPlayback'],
'Cleanup': ['Off']}
id = 0
notify = directNotify.newCategory('DistributedGolfHoleAI')
def __init__(self, zoneId, golfCourse, holeId):
FSM.__init__(self, 'Golf_%s_FSM' % self.id)
DistributedPhysicsWorldAI.DistributedPhysicsWorldAI.__init__(self, simbase.air)
GolfHoleBase.GolfHoleBase.__init__(self)
self.zoneId = zoneId
self.golfCourse = golfCourse
self.holeId = holeId
self.avIdList = golfCourse.avIdList[:]
self.watched = [0,
0,
0,
0]
self.barrierPlayback = None
self.trustedPlayerId = None
self.activeGolferIndex = None
self.activeGolferId = None
self.holeInfo = GolfGlobals.HoleInfo[self.holeId]
self.teeChosen = {}
for avId in self.avIdList:
self.teeChosen[avId] = -1
self.ballPos = {}
for avId in self.avIdList:
self.ballPos[avId] = Vec3(0, 0, 0)
self.playStarted = False
return
def curGolfBall(self):
return self.ball
def generate(self):
DistributedPhysicsWorldAI.DistributedPhysicsWorldAI.generate(self)
self.ball = self.createBall()
self.createRays()
if len(self.teePositions) > 1:
startPos = self.teePositions[1]
else:
startPos = self.teePositions[0]
startPos += Vec3(0, 0, GolfGlobals.GOLF_BALL_RADIUS)
self.ball.setPosition(startPos)
def delete(self):
self.notify.debug('__delete__')
DistributedPhysicsWorldAI.DistributedPhysicsWorldAI.delete(self)
self.notify.debug('calling self.terrainModel.removeNode')
self.terrainModel.removeNode()
self.notify.debug('self.barrierPlayback is %s' % self.barrierPlayback)
if self.barrierPlayback:
self.notify.debug('calling self.barrierPlayback.cleanup')
self.barrierPlayback.cleanup()
self.notify.debug('calling self.barrierPlayback = None')
self.barrierPlayback = None
self.activeGolferId = None
return
def setZoneId(self, zoneId):
self.zoneId = zoneId
def setAvatarReadyHole(self):
self.notify.debugStateCall(self)
avId = self.air.getAvatarIdFromSender()
self.golfCourse.avatarReadyHole(avId)
def startPlay(self):
self.notify.debug('startPlay')
self.playStarted = True
self.numGolfers = len(self.golfCourse.getGolferIds())
self.selectNextGolfer()
def selectNextGolfer(self):
self.notify.debug('selectNextGolfer, old golferIndex=%s old golferId=%s' % (self.activeGolferIndex, self.activeGolferId))
if self.golfCourse.isCurHoleDone():
return
if self.activeGolferIndex == None:
self.activeGolferIndex = 0
self.activeGolferId = self.golfCourse.getGolferIds()[self.activeGolferIndex]
else:
self.activeGolferIndex += 1
if self.activeGolferIndex >= len(self.golfCourse.getGolferIds()):
self.activeGolferIndex = 0
self.activeGolferId = self.golfCourse.getGolferIds()[self.activeGolferIndex]
safety = 0
while safety < 50 and not self.golfCourse.checkGolferPlaying(self.golfCourse.getGolferIds()[self.activeGolferIndex]):
self.activeGolferIndex += 1
self.notify.debug('Index %s' % self.activeGolferIndex)
if self.activeGolferIndex >= len(self.golfCourse.getGolferIds()):
self.activeGolferIndex = 0
self.activeGolferId = self.golfCourse.getGolferIds()[self.activeGolferIndex]
safety += 1
if safety != 50:
golferId = self.golfCourse.getGolferIds()[self.activeGolferIndex]
if self.teeChosen[golferId] == -1:
self.sendUpdate('golferChooseTee', [golferId])
self.request('WaitTee')
else:
self.sendUpdate('golfersTurn', [golferId])
self.request('WaitSwing')
else:
self.notify.debug('safety')
self.notify.debug('selectNextGolfer, new golferIndex=%s new golferId=%s' % (self.activeGolferIndex, self.activeGolferId))
return
def clearWatched(self):
self.watched = [1,
1,
1,
1]
for index in range(len(self.golfCourse.getGolferIds())):
self.watched[index] = 0
def setWatched(self, avId):
for index in range(len(self.golfCourse.getGolferIds())):
if self.golfCourse.getGolferIds()[index] == avId:
self.watched[index] = 1
def checkWatched(self):
if 0 not in self.watched:
return True
else:
return False
def turnDone(self):
self.notify.debug('Turn Done')
avId = self.air.getAvatarIdFromSender()
if self.barrierPlayback:
self.barrierPlayback.clear(avId)
def ballInHole(self, golferId = None):
self.notify.debug('ballInHole')
if golferId:
avId = golferId
else:
avId = self.air.getAvatarIdFromSender()
self.golfCourse.setBallIn(avId)
if self.golfCourse.isCurHoleDone():
self.notify.debug('ballInHole doing nothing')
else:
self.notify.debug('ballInHole calling self.selectNextGolfer')
self.selectNextGolfer()
def getHoleId(self):
return self.holeId
def finishHole(self):
self.notify.debug('finishHole')
self.golfCourse.holeOver()
def getGolferIds(self):
return self.avIdList
def loadLevel(self):
GolfHoleBase.GolfHoleBase.loadLevel(self)
optionalObjects = self.terrainModel.findAllMatches('**/optional*')
requiredObjects = self.terrainModel.findAllMatches('**/required*')
self.parseLocators(optionalObjects, 1)
self.parseLocators(requiredObjects, 0)
self.teeNodePath = self.terrainModel.find('**/tee0')
if self.teeNodePath.isEmpty():
teePos = Vec3(0, 0, 10)
else:
teePos = self.teeNodePath.getPos()
teePos.setZ(teePos.getZ() + GolfGlobals.GOLF_BALL_RADIUS)
self.notify.debug('teeNodePath heading = %s' % self.teeNodePath.getH())
self.teePositions = [teePos]
teeIndex = 1
teeNode = self.terrainModel.find('**/tee%d' % teeIndex)
while not teeNode.isEmpty():
teePos = teeNode.getPos()
teePos.setZ(teePos.getZ() + GolfGlobals.GOLF_BALL_RADIUS)
self.teePositions.append(teePos)
self.notify.debug('teeNodeP heading = %s' % teeNode.getH())
teeIndex += 1
teeNode = self.terrainModel.find('**/tee%d' % teeIndex)
def createLocatorDict(self):
self.locDict = {}
locatorNum = 1
curNodePath = self.hardSurfaceNodePath.find('**/locator%d' % locatorNum)
while not curNodePath.isEmpty():
self.locDict[locatorNum] = curNodePath
locatorNum += 1
curNodePath = self.hardSurfaceNodePath.find('**/locator%d' % locatorNum)
def loadBlockers(self):
loadAll = simbase.config.GetBool('golf-all-blockers', 0)
self.createLocatorDict()
self.blockerNums = self.holeInfo['blockers']
for locatorNum in self.locDict:
if locatorNum in self.blockerNums or loadAll:
locator = self.locDict[locatorNum]
locatorParent = locator.getParent()
locator.getChildren().wrtReparentTo(locatorParent)
else:
self.locDict[locatorNum].removeNode()
self.hardSurfaceNodePath.flattenStrong()
def createBall(self):
golfBallGeom = self.createSphere(self.world, self.space, GolfGlobals.GOLF_BALL_DENSITY, GolfGlobals.GOLF_BALL_RADIUS, 1)[1]
return golfBallGeom
def preStep(self):
GolfHoleBase.GolfHoleBase.preStep(self)
def postStep(self):
GolfHoleBase.GolfHoleBase.postStep(self)
def postSwing(self, cycleTime, power, x, y, z, dirX, dirY):
avId = self.air.getAvatarIdFromSender()
self.storeAction = [avId,
cycleTime,
power,
x,
y,
z,
dirX,
dirY]
if self.commonHoldData:
self.doAction()
def postSwingState(self, cycleTime, power, x, y, z, dirX, dirY, curAimTime, commonObjectData):
self.notify.debug('postSwingState')
if not self.golfCourse.getStillPlayingAvIds():
return
avId = self.air.getAvatarIdFromSender()
self.storeAction = [avId,
cycleTime,
power,
x,
y,
z,
dirX,
dirY]
self.commonHoldData = commonObjectData
self.trustedPlayerId = self.choosePlayerToSimulate()
self.sendUpdateToAvatarId(self.trustedPlayerId, 'assignRecordSwing', [avId,
cycleTime,
power,
x,
y,
z,
dirX,
dirY,
commonObjectData])
self.golfCourse.addAimTime(avId, curAimTime)
def choosePlayerToSimulate(self):
stillPlaying = self.golfCourse.getStillPlayingAvIds()
playerId = 0
if simbase.air.config.GetBool('golf-trust-driver-first', 0):
if stillPlaying:
playerId = stillPlaying[0]
else:
playerId = random.choice(stillPlaying)
return playerId
def ballMovie2AI(self, cycleTime, avId, movie, spinMovie, ballInFrame, ballTouchedHoleFrame, ballFirstTouchedHoleFrame, commonObjectData):
sentFromId = self.air.getAvatarIdFromSender()
if sentFromId == self.trustedPlayerId:
lastFrameNum = len(movie) - 2
if lastFrameNum < 0:
lastFrameNum = 0
lastFrame = movie[lastFrameNum]
lastPos = Vec3(lastFrame[1], lastFrame[2], lastFrame[3])
self.ballPos[avId] = lastPos
self.golfCourse.incrementScore(avId)
for id in self.golfCourse.getStillPlayingAvIds():
if not id == sentFromId:
self.sendUpdateToAvatarId(id, 'ballMovie2Client', [cycleTime,
avId,
movie,
spinMovie,
ballInFrame,
ballTouchedHoleFrame,
ballFirstTouchedHoleFrame,
commonObjectData])
if self.state == 'WaitPlayback' or self.state == 'WaitTee':
self.notify.warning('ballMovie2AI requesting from %s to WaitPlayback' % self.state)
self.request('WaitPlayback')
elif self.trustedPlayerId == None:
return
else:
self.doAction()
self.trustedPlayerId = None
return
def performReadyAction(self):
avId = self.storeAction[0]
if self.state == 'WaitPlayback':
self.notify.debugStateCall(self)
self.notify.debug('ignoring the postSwing for avId=%d since we are in WaitPlayback' % avId)
return
if avId == self.activeGolferId:
self.golfCourse.incrementScore(self.activeGolferId)
else:
self.notify.warning('activGolferId %d not equal to sender avId %d' % (self.activeGolferId, avId))
if avId not in self.golfCourse.drivingToons:
position = self.ballPos[avId]
else:
position = Vec3(self.storeAction[3], self.storeAction[4], self.storeAction[5])
self.useCommonObjectData(self.commonHoldData)
newPos = self.trackRecordBodyFlight(self.ball, self.storeAction[1], self.storeAction[2], position, self.storeAction[6], self.storeAction[7])
if self.state == 'WaitPlayback' or self.state == 'WaitTee':
self.notify.warning('performReadyAction requesting from %s to WaitPlayback' % self.state)
self.request('WaitPlayback')
self.sendUpdate('ballMovie2Client', [self.storeAction[1],
avId,
self.recording,
self.aVRecording,
self.ballInHoleFrame,
self.ballTouchedHoleFrame,
self.ballFirstTouchedHoleFrame,
self.commonHoldData])
self.ballPos[avId] = newPos
self.trustedPlayerId = None
return
def postResult(self, cycleTime, avId, recording, aVRecording, ballInHoleFrame, ballTouchedHoleFrame, ballFirstTouchedHoleFrame):
pass
def enterWaitSwing(self):
pass
def exitWaitSwing(self):
pass
def enterWaitTee(self):
pass
def exitWaitTee(self):
pass
def enterWaitPlayback(self):
self.notify.debug('enterWaitPlayback')
stillPlayingList = self.golfCourse.getStillPlayingAvIds()
self.barrierPlayback = ToonBarrier('waitClientsPlayback', self.uniqueName('waitClientsPlayback'), stillPlayingList, 120, self.handleWaitPlaybackDone, self.handlePlaybackTimeout)
def hasCurGolferReachedMaxSwing(self):
strokes = self.golfCourse.getCurHoleScore(self.activeGolferId)
maxSwing = self.holeInfo['maxSwing']
retval = strokes >= maxSwing
if retval:
av = simbase.air.doId2do.get(self.activeGolferId)
if av:
if av.getUnlimitedSwing():
retval = False
return retval
def handleWaitPlaybackDone(self):
if self.isCurBallInHole(self.activeGolferId) or self.hasCurGolferReachedMaxSwing():
if self.activeGolferId:
self.ballInHole(self.activeGolferId)
else:
self.selectNextGolfer()
def isCurBallInHole(self, golferId):
retval = False
for holePos in self.holePositions:
displacement = self.ballPos[golferId] - holePos
length = displacement.length()
self.notify.debug('hole %s length=%s' % (holePos, length))
if length <= GolfGlobals.DistanceToBeInHole:
retval = True
break
return retval
def exitWaitPlayback(self):
self.notify.debug('exitWaitPlayback')
if hasattr(self, 'barrierPlayback') and self.barrierPlayback:
self.barrierPlayback.cleanup()
self.barrierPlayback = None
return
def enterCleanup(self):
pass
def exitCleanup(self):
pass
def handlePlaybackTimeout(self, task = None):
self.notify.debug('handlePlaybackTimeout')
self.handleWaitPlaybackDone()
def getGolfCourseDoId(self):
return self.golfCourse.doId
def avatarDropped(self, avId):
self.notify.warning('avId %d dropped, self.state=%s' % (avId, self.state))
if self.barrierPlayback:
self.barrierPlayback.clear(avId)
else:
if avId == self.trustedPlayerId:
self.doAction()
if avId == self.activeGolferId and not self.golfCourse.haveAllGolfersExited():
self.selectNextGolfer()
def setAvatarTee(self, chosenTee):
golferId = self.air.getAvatarIdFromSender()
self.teeChosen[golferId] = chosenTee
self.ballPos[golferId] = self.teePositions[chosenTee]
self.sendUpdate('setAvatarFinalTee', [golferId, chosenTee])
self.sendUpdate('golfersTurn', [golferId])
self.request('WaitSwing')
def setBox(self, pos0, pos1, pos2, quat0, quat1, quat2, quat3, anV0, anV1, anV2, lnV0, lnV1, lnV2):
self.sendUpdate('sendBox', [pos0,
pos1,
pos2,
quat0,
quat1,
quat2,
quat3,
anV0,
anV1,
anV2,
lnV0,
lnV1,
lnV2])
def parseLocators(self, objectCollection, optional = 0):
if optional and objectCollection.getNumPaths():
if self.holeInfo.has_key('optionalMovers'):
for optionalMoverId in self.holeInfo['optionalMovers']:
searchStr = 'optional_mover_' + str(optionalMoverId)
for objIndex in xrange(objectCollection.getNumPaths()):
object = objectCollection.getPath(objIndex)
if searchStr in object.getName():
self.fillLocator(objectCollection, objIndex)
break
else:
for index in range(objectCollection.getNumPaths()):
self.fillLocator(objectCollection, index)
def fillLocator(self, objectCollection, index):
path = objectCollection[index]
pathName = path.getName()
pathArray = pathName.split('_')
sizeX = None
sizeY = None
move = None
type = None
for subString in pathArray:
if subString[:1] == 'X':
dataString = subString[1:]
dataString = dataString.replace('p', '.')
sizeX = float(dataString)
elif subString[:1] == 'Y':
dataString = subString[1:]
dataString = dataString.replace('p', '.')
sizeY = float(dataString)
elif subString[:1] == 'd':
dataString = subString[1:]
dataString = dataString.replace('p', '.')
move = float(dataString)
elif subString == 'mover':
type = 4
elif subString == 'windmillLocator':
type = 3
if type == 4 and move and sizeX and sizeY:
self.createCommonObject(4, path.getPos(), path.getHpr(), sizeX, sizeY, move)
elif type == 3:
self.createCommonObject(3, path.getPos(), path.getHpr())
return
| mit | 5,974,727,302,163,878,000 | 36.363454 | 185 | 0.604342 | false |
FabriceSalvaire/monitor-server | MonitorServer/Tools/Singleton.py | 1 | 4601 | ####################################################################################################
#
# MonitorServer - A Server Monitoring Application
# Copyright (C) 2014 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
####################################################################################################
"""
Singleton snippets.
"""
####################################################################################################
from __future__ import print_function
####################################################################################################
import threading
####################################################################################################
class SingletonMetaClass(type):
""" A singleton metaclass.
This implementation supports subclassing and is thread safe.
"""
##############################################
def __init__(cls, class_name, super_classes, class_attribute_dict):
# It is called just after cls creation in order to complete cls.
# print('MetaSingleton __init__:', cls, class_name, super_classes, class_attribute_dict, sep='\n... ')
type.__init__(cls, class_name, super_classes, class_attribute_dict)
cls._instance = None
cls._rlock = threading.RLock() # A factory function that returns a new reentrant lock object.
##############################################
def __call__(cls, *args, **kwargs):
# It is called when cls is instantiated: cls(...).
# type.__call__ dispatches to the cls.__new__ and cls.__init__ methods.
# print('MetaSingleton __call__:', cls, args, kwargs, sep='\n... ')
with cls._rlock:
if cls._instance is None:
cls._instance = type.__call__(cls, *args, **kwargs)
return cls._instance
####################################################################################################
class singleton(object):
""" A singleton class decorator.
This implementation doesn't support subclassing.
"""
##############################################
def __init__(self, cls):
# print('singleton __init__: On @ decoration', cls, sep='\n... ')
self._cls = cls
self._instance = None
##############################################
def __call__(self, *args, **kwargs):
# print('singleton __call__: On instance creation', self, args, kwargs, sep='\n... ')
if self._instance is None:
self._instance = self._cls(*args, **kwargs)
return self._instance
####################################################################################################
def singleton_func(cls):
""" A singleton function decorator.
This implementation doesn't support subclassing.
"""
# print('singleton_func: On @ decoration', cls, sep='\n... ')
instances = {}
def get_instance(*args, **kwargs):
# print('singleton_func: On instance creation', cls, sep='\n... ')
if cls not in instances:
instances[cls] = cls(*args, **kwargs)
return instances[cls]
return get_instance
####################################################################################################
class monostate(object):
""" A monostate base class.
"""
_shared_state = {}
##############################################
def __new__(cls, *args, **kwargs):
# print('monostate __new__:', cls, args, kwargs, sep='\n... ')
obj = super(monostate, cls).__new__(cls, *args, **kwargs)
obj.__dict__ = cls._shared_state
return obj
####################################################################################################
#
# End
#
####################################################################################################
| gpl-3.0 | -2,197,945,327,112,230,400 | 30.513699 | 110 | 0.433601 | false |
RyanChinSang/ECNG3020-ORSS4SCVI | BETA/TestCode/SpchRecg/APP-SpchRecg3.py | 1 | 1382 | import os
import pocketsphinx as ps
modeldir = "C:/Python36-64/Lib/site-packages/pocketsphinx/model/"
# datadir = "C:/Python36-64/Lib/site-packages/pocketsphinx/data/"
# Create a decoder with certain model
config = ps.Decoder.default_config()
config.set_string('-hmm', os.path.join(modeldir, 'en-us'))
config.set_string('-lm', os.path.join(modeldir, 'en-us.lm.bin'))
config.set_string('-dict', os.path.join(modeldir, 'cmudict-en-us.dict'))
# config.set_string('-kws', 'command.list')
# Open file to read the data
# stream = open(os.path.join(datadir, "goforward.raw"), "rb")
# Alternatively you can read from microphone
import pyaudio
#
#
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paInt16, channels=1, rate=16000, input=True, frames_per_buffer=1024)
stream.start_stream()
# Process audio chunk by chunk. On keyword detected perform action and restart search
decoder = ps.Decoder(config)
# decoder = Decoder()
decoder.start_utt()
while True:
buf = stream.read(1024)
if buf:
decoder.process_raw(buf, False, False)
else:
break
if decoder.hyp() is not None:
print(decoder.hyp().hypstr)
# print([(seg.word, seg.prob, seg.start_frame, seg.end_frame) for seg in decoder.seg()])
# print("Detected keyword, restarting search")
decoder.end_utt()
decoder.start_utt()
# print(decoder.hyp().hypstr)
| gpl-3.0 | 2,085,290,420,487,328,000 | 32.707317 | 99 | 0.68958 | false |
Kocal/django-tornado-websockets | tornado_websockets/modules/progressbar.py | 1 | 3746 | # coding=utf-8
from tornado_websockets.modules.module import Module
class ProgressBar(Module):
"""
Initialize a new ProgressBar module instance.
If ``min`` and ``max`` values are equal, this progress bar has its indeterminate state
set to ``True``.
:param min: Minimum value
:param max: Maximum value
:type min: int
:type max: int
"""
def __init__(self, name='', min=0, max=100, indeterminate=False):
if name:
name = '_' + name
super(ProgressBar, self).__init__('progressbar' + name)
if max < min:
raise ValueError('Param « min » can not be greater or equal than param « max ».')
self.min = self.current = min
self.max = max
self.indeterminate = indeterminate
def initialize(self):
@self.on
def open():
self.emit_init()
def tick(self, label=None):
"""
Increments progress bar's _current by ``1`` and emit ``update`` event. Can also emit ``done`` event if
progression is done.
Call :meth:`~tornado_websockets.modules.progress_bar.ProgressBar.emit_update` method each time this
method is called.
Call :meth:`~tornado_websockets.modules.progress_bar.ProgressBar.emit_done` method if progression is
done.
:param label: A label which can be displayed on the client screen
:type label: str
"""
if not self.indeterminate and self.current < self.max:
self.current += 1
self.emit_update(label)
if self.is_done():
self.emit_done()
def reset(self):
"""
Reset progress bar's progression to its minimum value.
"""
self.current = self.min
def is_done(self):
"""
Return ``True`` if progress bar's progression is done, otherwise ``False``.
Returns ``False`` if progress bar is indeterminate, returns ``True`` if progress bar is
determinate and current value is equals to ``max`` value.
Returns ``False`` by default.
:rtype: bool
"""
if self.indeterminate:
return False
if self.current is self.max:
return True
return False
def emit_init(self):
"""
Emit ``before_init``, ``init`` and ``after_init`` events to initialize a client-side progress bar.
If progress bar is not indeterminate, ``min``, ``max`` and ``value`` values are sent with ``init`` event.
"""
data = {'indeterminate': self.indeterminate}
if not self.indeterminate:
data.update({
'min': int(self.min),
'max': int(self.max),
'current': int(self.current),
})
self.emit('before_init')
self.emit('init', data)
self.emit('after_init')
def emit_update(self, label=None):
"""
Emit ``before_update``, ``update`` and ``after_update`` events to update a client-side progress bar.
:param label: A label which can be displayed on the client screen
:type label: str
"""
data = {}
if not self.indeterminate:
data.update({'current': self.current})
if label:
data.update({'label': label})
self.emit('before_update')
self.emit('update', data)
self.emit('after_update')
def emit_done(self):
"""
Emit ``done`` event when progress bar's progression
:meth:`~tornado_websockets.modules.progress_bar.ProgressBar.is_done`.
"""
self.emit('done')
| gpl-3.0 | 7,786,795,235,155,894,000 | 28.234375 | 117 | 0.552378 | false |
zhouzhaoze/dip | project3/Proj04-04/main.py | 1 | 2549 | #!/bin/python
# *-* encoding=utf-8 *-*
'''
Image Priting Program Based on Haftoning
'''
import sys
import numpy, scipy
from scipy import ndimage
from scipy import misc
import scipy.fftpack as fftpack
import matplotlib.pyplot as plt
sys.path.append('../Proj04-01')
from DFT import DFT_2D, IDFT_2D
def en_padding(img):
M, N = img.shape
P, Q = 2 * M, 2 * N
_img = numpy.zeros(P*Q).reshape((P, Q))
for x in range(M):
for y in range(N):
_img[x][y] = img[x][y]
return _img
def de_padding(img):
P, Q = img.shape
M, N = P/2, Q/2
_img = numpy.zeros(M*N).reshape((M, N))
for x in range(M):
for y in range(N):
_img[x][y] = img[x][y]
return _img
def shift(img):
M, N = img.shape
_img = img.copy()
for x in range(M):
for y in range(N):
_img[x][y] = img[x][y] * numpy.power(-1.0, (x+y))
return _img
def sqdistance(p1, p2):
return ((p1[0]-p2[0])*(p1[0]-p2[0])) + \
((p1[1]-p2[1])*(p1[1]-p2[1]))
def lowpass_mask(P, Q, cuf_off_frequency):
center = (P/2.0, Q/2.0)
mask = numpy.zeros(P * Q).reshape(P, Q)
for u in range(P):
for v in range(Q):
mask[u][v] = numpy.exp(-1*sqdistance(center, (u, v)) / (2*(cuf_off_frequency*cuf_off_frequency)))
return mask
def highpass_mask(P, Q, cuf_off_frequency):
return 1.0 - lowpass_mask(P, Q, cuf_off_frequency)
# center = (P/2.0, Q/2.0)
# mask = numpy.zeros(P * Q).reshape(P, Q)
# for u in range(P):
# for v in range(Q):
# mask[u][v] = 1.0-numpy.exp(-1*sqdistance(center, (u, v)) / (2*(cuf_off_frequency*cuf_off_frequency)))
# return mask
def main():
img_file = 'Fig0441(a)(characters_test_pattern).tif'
img = misc.imread(img_file)
padding_img = en_padding(img)
padding_img = shift(padding_img)
dft_img = DFT_2D(padding_img)
for cut_off_frequency in [30, 60, 160]:
print cut_off_frequency
hp_mask = highpass_mask(dft_img.shape[0], dft_img.shape[1], cut_off_frequency)
misc.imsave('%s_hpmask_%d.tif' % (img_file, cut_off_frequency), 255 * hp_mask)
hp_img = numpy.multiply(dft_img, hp_mask)
misc.imsave('%s_fft_%d.tif' % (img_file, cut_off_frequency), numpy.log(1+numpy.abs(hp_img)))
hp_idtft_img = shift(IDFT_2D(hp_img).real)
hp_idtft_img = de_padding(hp_idtft_img)
print hp_idtft_img.shape
misc.imsave('%s_hp_%d.tif' % (img_file, cut_off_frequency), hp_idtft_img)
if __name__ == '__main__':
main()
| apache-2.0 | -3,213,598,260,219,212,000 | 27.322222 | 114 | 0.567674 | false |
solarpermit/solarpermit | website/migrations/0086_auto__del_actiontutorial__del_person__del_userreward__del_applicationh.py | 1 | 68516 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'ActionTutorial'
db.delete_table('website_actiontutorial')
# Deleting model 'Person'
db.delete_table('website_person')
# Deleting model 'UserReward'
db.delete_table('website_userreward')
# Deleting model 'ApplicationHistory'
db.delete_table('website_applicationhistory')
# Deleting model 'Document'
db.delete_table('website_document')
# Deleting model 'UserTutorialHistory'
db.delete_table('website_usertutorialhistory')
# Deleting model 'Tutorial'
db.delete_table('website_tutorial')
# Deleting model 'DocumentCategory'
db.delete_table('website_documentcategory')
# Deleting model 'QuestionDependency'
db.delete_table('website_questiondependency')
# Deleting model 'ApplicationAnswer'
db.delete_table('website_applicationanswer')
# Deleting model 'PersonAddress'
db.delete_table('website_personaddress')
# Deleting model 'Application'
db.delete_table('website_application')
# Deleting model 'Region'
db.delete_table('website_region')
# Deleting model 'UserTutorialPageHistory'
db.delete_table('website_usertutorialpagehistory')
# Deleting model 'TutorialPage'
db.delete_table('website_tutorialpage')
# Deleting field 'Jurisdiction.region'
db.delete_column('website_jurisdiction', 'region_id')
# Deleting field 'OrganizationAddress.address_type'
db.delete_column('website_organizationaddress', 'address_type')
# Deleting field 'OrganizationMember.person'
db.delete_column('website_organizationmember', 'person_id')
def backwards(self, orm):
# Adding model 'ActionTutorial'
db.create_table('website_actiontutorial', (
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('action_identifier', self.gf('django.db.models.fields.CharField')(blank=True, max_length=64, null=True, db_index=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tutorial', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Tutorial'], null=True, blank=True)),
))
db.send_create_signal('website', ['ActionTutorial'])
# Adding model 'Person'
db.create_table('website_person', (
('phone_primary', self.gf('django.contrib.localflavor.us.models.PhoneNumberField')(max_length=20, null=True, blank=True)),
('last_name', self.gf('django.db.models.fields.CharField')(blank=True, max_length=64, null=True, db_index=True)),
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('first_name', self.gf('django.db.models.fields.CharField')(blank=True, max_length=64, null=True, db_index=True)),
('phone_secondary', self.gf('django.contrib.localflavor.us.models.PhoneNumberField')(max_length=20, null=True, blank=True)),
('phone_mobile', self.gf('django.contrib.localflavor.us.models.PhoneNumberField')(max_length=20, null=True, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
))
db.send_create_signal('website', ['Person'])
# Adding model 'UserReward'
db.create_table('website_userreward', (
('reward_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('reward', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.RewardCategory'], null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('website', ['UserReward'])
# Adding model 'ApplicationHistory'
db.create_table('website_applicationhistory', (
('status', self.gf('django.db.models.fields.CharField')(blank=True, max_length=8, null=True, db_index=True)),
('application', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Application'])),
('status_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('status_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal('website', ['ApplicationHistory'])
# Adding model 'Document'
db.create_table('website_document', (
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('file_name', self.gf('django.db.models.fields.CharField')(blank=True, max_length=64, null=True, db_index=True)),
('jurisdiction', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Jurisdiction'], null=True, blank=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('reviewed', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True)),
('accepted', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(blank=True, max_length=128, null=True, db_index=True)),
('region', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Region'], null=True, blank=True)),
('file_path', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
))
db.send_create_signal('website', ['Document'])
# Adding model 'UserTutorialHistory'
db.create_table('website_usertutorialhistory', (
('view_datetime', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('user_email', self.gf('django.db.models.fields.EmailField')(blank=True, max_length=75, null=True, db_index=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tutorial', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Tutorial'], null=True, blank=True)),
))
db.send_create_signal('website', ['UserTutorialHistory'])
# Adding model 'Tutorial'
db.create_table('website_tutorial', (
('start_datetime', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('identifier', self.gf('django.db.models.fields.CharField')(blank=True, max_length=64, null=True, db_index=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('name', self.gf('django.db.models.fields.CharField')(blank=True, max_length=128, null=True, db_index=True)),
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('end_datetime', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('website', ['Tutorial'])
# Adding model 'DocumentCategory'
db.create_table('website_documentcategory', (
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('website', ['DocumentCategory'])
# Adding model 'QuestionDependency'
db.create_table('website_questiondependency', (
('required', self.gf('django.db.models.fields.BooleanField')(default=False)),
('answer_text', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('question2', self.gf('django.db.models.fields.related.ForeignKey')(related_name='_questionDependency_question2', to=orm['website.Question'])),
('question1', self.gf('django.db.models.fields.related.ForeignKey')(related_name='_questionDependency_question1', to=orm['website.Question'])),
('strength', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)),
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal('website', ['QuestionDependency'])
# Adding model 'ApplicationAnswer'
db.create_table('website_applicationanswer', (
('application', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Application'])),
('template', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Template'])),
('file_upload', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True, blank=True)),
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('question', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Question'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('value', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal('website', ['ApplicationAnswer'])
# Adding model 'PersonAddress'
db.create_table('website_personaddress', (
('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Person'], null=True, blank=True)),
('address', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Address'], null=True, blank=True)),
('display_order', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)),
('address_type', self.gf('django.db.models.fields.CharField')(blank=True, max_length=8, null=True, db_index=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal('website', ['PersonAddress'])
# Adding model 'Application'
db.create_table('website_application', (
('jurisdiction', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Jurisdiction'], null=True, blank=True)),
('template', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Template'], null=True, blank=True)),
('address', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Address'], null=True, blank=True)),
('applicant', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('current_status', self.gf('django.db.models.fields.CharField')(blank=True, max_length=8, null=True, db_index=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal('website', ['Application'])
# Adding model 'Region'
db.create_table('website_region', (
('state', self.gf('django.db.models.fields.CharField')(blank=True, max_length=8, null=True, db_index=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(blank=True, max_length=64, null=True, db_index=True)),
('latitude', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=10, decimal_places=7, blank=True)),
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('longitude', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=10, decimal_places=7, blank=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal('website', ['Region'])
# Adding model 'UserTutorialPageHistory'
db.create_table('website_usertutorialpagehistory', (
('checked', self.gf('django.db.models.fields.BooleanField')(default=False)),
('tutorial', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Tutorial'], null=True, blank=True)),
('user_email', self.gf('django.db.models.fields.EmailField')(blank=True, max_length=75, null=True, db_index=True)),
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.TutorialPage'], null=True, blank=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal('website', ['UserTutorialPageHistory'])
# Adding model 'TutorialPage'
db.create_table('website_tutorialpage', (
('selector', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('display_order', self.gf('django.db.models.fields.SmallIntegerField')(default=0)),
('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('tip', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tutorial', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Tutorial'], null=True, blank=True)),
('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)),
))
db.send_create_signal('website', ['TutorialPage'])
# Adding field 'Jurisdiction.region'
db.add_column('website_jurisdiction', 'region',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Region'], null=True, blank=True),
keep_default=False)
# Adding field 'OrganizationAddress.address_type'
db.add_column('website_organizationaddress', 'address_type',
self.gf('django.db.models.fields.CharField')(blank=True, max_length=8, null=True, db_index=True),
keep_default=False)
# Adding field 'OrganizationMember.person'
db.add_column('website_organizationmember', 'person',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Person'], null=True, blank=True),
keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.action': {
'Meta': {'object_name': 'Action'},
'action_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ActionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.actioncategory': {
'Meta': {'object_name': 'ActionCategory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.address': {
'Meta': {'object_name': 'Address'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'website.answerchoice': {
'Meta': {'object_name': 'AnswerChoice'},
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']"}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'website.answerchoicegroup': {
'Meta': {'object_name': 'AnswerChoiceGroup'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'website.answerreference': {
'Meta': {'object_name': 'AnswerReference'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_callout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'migrated_answer_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.applicability': {
'Meta': {'object_name': 'Applicability'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.comment': {
'Meta': {'object_name': 'Comment'},
'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_reference'", 'null': 'True', 'to': "orm['website.Comment']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.entityview': {
'Meta': {'object_name': 'EntityView'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.entityviewcount': {
'Meta': {'object_name': 'EntityViewCount'},
'count_30_days': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'total_count': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.jurisdiction': {
'Meta': {'object_name': 'Jurisdiction'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'last_contributed': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'last_contributed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'last_contributed_by_org': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_contributor'", 'null': 'True', 'to': "orm['website.Organization']"}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_jurisdiction'", 'null': 'True', 'to': "orm['website.Organization']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_jurisdiction'", 'null': 'True', 'to': "orm['website.Jurisdiction']"}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.jurisdictioncontributor': {
'Meta': {'object_name': 'JurisdictionContributor'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.migrationhistory': {
'Meta': {'object_name': 'MigrationHistory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes2': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'source_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'source_table': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'target_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'target_table': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organization': {
'Meta': {'object_name': 'Organization'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.OrganizationCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fax': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'logo_scaled': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'parent_org': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'website.organizationaddress': {
'Meta': {'object_name': 'OrganizationAddress'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'})
},
'website.organizationcategory': {
'Meta': {'object_name': 'OrganizationCategory'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.organizationmember': {
'Meta': {'object_name': 'OrganizationMember'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitation_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'invitor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_invitor'", 'null': 'True', 'to': "orm['auth.User']"}),
'join_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'requested_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RoleType']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_user'", 'null': 'True', 'to': "orm['auth.User']"})
},
'website.organizationrating': {
'Meta': {'object_name': 'OrganizationRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'website.question': {
'Meta': {'object_name': 'Question'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']", 'null': 'True', 'blank': 'True'}),
'applicability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Applicability']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'default_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'display_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_attributes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_suffix': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'form_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'has_multivalues': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instruction': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'js': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'migration_type': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'qtemplate': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']", 'null': 'True'}),
'question': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'state_exclusive': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'terminology': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'validation_class': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.questioncategory': {
'Meta': {'object_name': 'QuestionCategory'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'website.ratingcategory': {
'Meta': {'object_name': 'RatingCategory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rating_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'})
},
'website.ratinglevel': {
'Meta': {'object_name': 'RatingLevel'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'website.reaction': {
'Meta': {'object_name': 'Reaction'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Action']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ReactionCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}),
'reaction_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.reactioncategory': {
'Meta': {'object_name': 'ReactionCategory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'})
},
'website.rewardcategory': {
'Meta': {'object_name': 'RewardCategory'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.roletype': {
'Meta': {'object_name': 'RoleType'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'website.servervariable': {
'Meta': {'object_name': 'ServerVariable'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'website.template': {
'Meta': {'object_name': 'Template'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'})
},
'website.templatequestion': {
'Meta': {'object_name': 'TemplateQuestion'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"})
},
'website.usercommentview': {
'Meta': {'object_name': 'UserCommentView'},
'comments_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'last_comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Comment']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'view_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'website.userdetail': {
'Meta': {'object_name': 'UserDetail'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_preference': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'migrated_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'notification_preference': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'old_password': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'reset_password_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '124', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userfavorite': {
'Meta': {'object_name': 'UserFavorite'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userpageview': {
'Meta': {'object_name': 'UserPageView'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_page_view_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.userrating': {
'Meta': {'object_name': 'UserRating'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.usersearch': {
'Meta': {'object_name': 'UserSearch'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'search_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'website.view': {
'Meta': {'object_name': 'View'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'view_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'})
},
'website.vieworgs': {
'Meta': {'object_name': 'ViewOrgs'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']"}),
'view': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.View']"})
},
'website.viewquestions': {
'Meta': {'object_name': 'ViewQuestions'},
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}),
'view': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.View']"})
},
'website.zipcode': {
'Meta': {'object_name': 'Zipcode'},
'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}),
'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '2', 'null': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'})
}
}
complete_apps = ['website'] | bsd-3-clause | 3,161,687,375,410,409,000 | 87.983117 | 200 | 0.573939 | false |
Upande/MaMaSe | apps/event/migrations/0001_initial.py | 1 | 3706 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import wagtail.wagtailcore.fields
from django.conf import settings
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('taggit', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'),
]
operations = [
migrations.CreateModel(
name='Attendee',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('tickets', models.IntegerField(default=1)),
('date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='EventIndexPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.wagtailcore.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='EventIndexRelatedLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('sort_order', models.IntegerField(null=True, editable=False, blank=True)),
('link_external', models.URLField(verbose_name=b'External link', blank=True)),
('title', models.CharField(help_text=b'Link title', max_length=255)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='EventPage',
fields=[
('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('start_date', models.DateTimeField(verbose_name=b'Start date')),
('end_date', models.DateTimeField(verbose_name=b'End date')),
('body', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('location', wagtail.wagtailcore.fields.RichTextField(max_length=200)),
('maps_url', models.URLField(verbose_name=b'Map Link', blank=True)),
('cost', models.IntegerField(default=0)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='EventPageTag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('content_object', modelcluster.fields.ParentalKey(related_name='tagged_items', to='event.EventPage')),
('tag', models.ForeignKey(related_name='event_eventpagetag_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='watchlist',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateTimeField(auto_now_add=True)),
('event', models.ForeignKey(to='event.EventPage')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
| apache-2.0 | 5,118,050,073,958,929,000 | 41.113636 | 146 | 0.552887 | false |
marcoscastro/facebook_profile_photo | gui.py | 1 | 1243 | from tkinter import *
from face_photo import *
from PIL import Image, ImageTk
window = Tk() # cria uma janela
window.title('Facebook profile photo') # seta o título da janela
window.geometry('450x300') # seta o tamanho da janela
entry = Entry(window, width=25, justify='center') # cria uma entrada de texto
entry.insert(0, 'Digite o ID do Facebook') # seta o texto
entry.pack() # gerenciador de geometria
entry.focus_set() # obtém o foco para a entrada de texto
old_label_image = None
# função para o evento de clique do botão
def click_button():
global old_label_image
ID = entry.get() # obtém o texto
if not ID: # verifica se o texto está vazio
entry.insert(0, 'Digite o ID do Facebook')
else:
if get_photo(ID):
# carregando a imagem
img = ImageTk.PhotoImage(Image.open(ID + '.png'))
# criando um panel para inserir a imagem
label_image = Label(window, image=img)
label_image.pack_forget()
label_image.image = img
label_image.pack()
if old_label_image is not None:
old_label_image.destroy()
old_label_image = label_image
else:
pass
# cria um botão
btn = Button(window, text='Show photo', width=20, command=click_button)
btn.pack()
# loop principal da aplicação
window.mainloop() | mit | 2,414,084,095,627,359,000 | 26.422222 | 77 | 0.70884 | false |
oVirt/ovirt-scheduler-proxy | src/ovirtscheduler/oschedproxyd.py | 1 | 3539 | #
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import socketserver
import sys
from logging.handlers import RotatingFileHandler
from time import strftime
from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
from ovirtscheduler import API
from ovirtscheduler.request_handler import RequestHandler
class SimpleThreadedXMLRPCServer(socketserver.ThreadingMixIn,
SimpleXMLRPCServer):
pass
class XMLPRPCRequestHandler(SimpleXMLRPCRequestHandler):
if sys.version_info[:2] == (2, 6):
# Override BaseHTTPServer.BaseRequestHandler implementation to avoid
# pointless and slow attempt to get the fully qualified host name from
# the client address. This method is not used any more in Python 2.7.
def address_string(self):
return self.client_address[0]
def setup_logging(path):
file_handler = RotatingFileHandler(path,
maxBytes=50*1024,
backupCount=6)
log_formatter = logging.Formatter('%(asctime)s %(levelname)-8s'
' [process:%(processName)s,'
' thread:%(threadName)s] '
'%(message)s',
'%a, %d %b %Y %H:%M:%S')
file_handler.setFormatter(log_formatter)
logger = logging.getLogger()
logger.addHandler(file_handler)
logger.setLevel(logging.DEBUG)
class ProxyServer(object):
def __init__(self, plugin_path=None):
self._server = None
self._handler = None
if plugin_path is None:
self._plugin_path = os.path.join(os.getcwd(), "plugins")
else:
self._plugin_path = plugin_path
def setup(self):
logging.info("Setting up server")
self._server = SimpleThreadedXMLRPCServer(
("localhost", 18781),
allow_none=True,
requestHandler=XMLPRPCRequestHandler)
analyzer_path = os.path.dirname(__file__)
logging.info("Loading modules from %s" % self._plugin_path)
logging.info("Loading analyzer from %s" % analyzer_path)
self._handler = RequestHandler(
self._plugin_path,
analyzer_path)
def run(self):
logging.info("Publishing API")
self._server.register_introspection_functions()
self._server.register_instance(API.API(self._handler))
self._server.serve_forever()
# for test runs
def main():
server = ProxyServer(os.environ.get("OSCHEDPROXY_PLUGINS", None))
server.setup()
server.run()
if __name__ == "__main__":
log_filename = '/var/log/ovirt-scheduler-proxy/ovirt-scheduler-proxy.log'
try:
setup_logging(log_filename)
except IOError:
log_filename = './ovirt-scheduler-proxy.' \
+ strftime("%Y%m%d_%H%M%S") + '.log'
setup_logging(log_filename)
main()
| apache-2.0 | 651,295,214,918,901,600 | 32.386792 | 78 | 0.627578 | false |
imapp-pl/golem | golem/ethereum/client.py | 1 | 2118 | import logging
import rlp
from eth_rpc_client import Client as EthereumRpcClient
from .node import NodeProcess
log = logging.getLogger('golem.ethereum')
class Client(EthereumRpcClient):
""" RPC interface client for Ethereum node."""
STATIC_NODES = ["enode://f1fbbeff7e9777a3a930f1e55a5486476845f799f7d603f71be7b00898df98f2dc2e81b854d2c774c3d266f1fa105d130d4a43bc58e700155c4565726ae6804e@94.23.17.170:30900"] # noqa
node = None
def __init__(self, datadir, nodes=None):
if not nodes:
nodes = Client.STATIC_NODES
if not Client.node:
Client.node = NodeProcess(nodes, datadir)
else:
assert Client.node.datadir == datadir, \
"Ethereum node's datadir cannot be changed"
if not Client.node.is_running():
Client.node.start(rpc=True)
super(Client, self).__init__(port=Client.node.rpcport)
@staticmethod
def _kill_node():
# FIXME: Keeping the node as a static object might not be the best.
if Client.node:
Client.node.stop()
Client.node = None
def get_peer_count(self):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#net_peerCount
"""
response = self.make_request("net_peerCount", [])
return int(response['result'], 16)
def is_syncing(self):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_syncing
"""
response = self.make_request("eth_syncing", [])
result = response['result']
return bool(result)
def get_transaction_count(self, address):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_gettransactioncount
"""
response = self.make_request("eth_getTransactionCount", [address, "pending"])
return int(response['result'], 16)
def send_raw_transaction(self, data):
response = self.make_request("eth_sendRawTransaction", [data])
return response['result']
def send(self, transaction):
return self.send_raw_transaction(rlp.encode(transaction).encode('hex'))
| gpl-3.0 | 3,904,709,206,332,040,700 | 32.09375 | 186 | 0.635033 | false |
vincent-noel/libSigNetSim | libsignetsim/model/tests/TestAnnotation.py | 1 | 5911 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2017 Vincent Noel ([email protected])
#
# This file is part of libSigNetSim.
#
# libSigNetSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# libSigNetSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with libSigNetSim. If not, see <http://www.gnu.org/licenses/>.
"""
Testing the reading/writing of miriam annotations of the model
"""
from libsignetsim import SbmlDocument, Settings
from libsignetsim.uris.URI import URI
from unittest import TestCase
from os.path import join, dirname, isdir
from os import mkdir, getcwd
class TestAnnotation(TestCase):
""" Tests high level functions """
def testReadWriteModel(self):
testfiles_path = join(join(getcwd(), dirname(__file__)), "files")
sbml_doc = SbmlDocument()
sbml_doc.readSbmlFromFile(join(testfiles_path, "BIOMD0000000001.xml"))
sbml_doc.writeSbmlToFile(join(Settings.tempDirectory, "BIOMD0000000001.xml"))
sbml_doc.readSbmlFromFile(join(testfiles_path, "BIOMD0000000001.xml"))
sbml_model = sbml_doc.getModelInstance()
self.assertEqual(sbml_model.modelHistory.getDateCreated(), "2005-02-02T14:56:11Z")
creator = sbml_model.modelHistory.getListOfCreators()[0]
self.assertEqual(creator.getEmail(), "[email protected]")
self.assertEqual(creator.getGivenName(), "Nicolas")
self.assertEqual(creator.getFamilyName(), "Le Novère")
taxon_uri = URI()
taxon_uri.setTaxonomy('7787')
self.assertEqual(sbml_model.getAnnotation().getHasTaxon()[0], taxon_uri)
self.assertEqual(sbml_model.getAnnotation().getHasTaxon()[0].getName(), "Tetronarce californica")
self.assertEqual(sbml_model.getAnnotation().getHasProperty(), [])
self.assertEqual(sbml_model.getAnnotation().getHasPart(), [])
self.assertEqual(sbml_model.getAnnotation().getHasVersion(), [])
self.assertEqual(sbml_model.getAnnotation().getIs(), [])
self.assertEqual(sbml_model.getAnnotation().getIsDescribedBy(), [])
self.assertEqual(sbml_model.getAnnotation().getIsEncodedBy(), [])
self.assertEqual(sbml_model.getAnnotation().getIsHomologTo(), [])
self.assertEqual(sbml_model.getAnnotation().getIsPartOf(), [])
self.assertEqual(sbml_model.getAnnotation().getIsPropertyOf(), [])
go_process1_uri = URI()
go_process1_uri.setGO('GO:0007274')
go_process2_uri = URI()
go_process2_uri.setGO('GO:0007166')
go_process3_uri = URI()
go_process3_uri.setGO('GO:0019226')
self.assertEqual(sbml_model.getAnnotation().getIsVersionOf()[0], go_process1_uri)
self.assertEqual(sbml_model.getAnnotation().getIsVersionOf()[1], go_process2_uri)
self.assertEqual(sbml_model.getAnnotation().getIsVersionOf()[2], go_process3_uri)
self.assertEqual(sbml_model.getAnnotation().getOccursIn(), [])
self.assertEqual(sbml_model.getAnnotation().getUnknown(), [])
self.assertEqual(sbml_model.getAnnotation().getModelHasInstance(), [])
biomodels_ref1_uri = URI()
biomodels_ref1_uri.setBiomodels('MODEL6613849442')
biomodels_ref2_uri = URI()
biomodels_ref2_uri.setBiomodels('BIOMD0000000001')
self.assertEqual(sbml_model.getAnnotation().getModelIs()[0], biomodels_ref1_uri)
self.assertEqual(sbml_model.getAnnotation().getModelIs()[1], biomodels_ref2_uri)
self.assertEqual(sbml_model.getAnnotation().getModelIsDerivedFrom(), [])
publication_uri = URI()
publication_uri.setPubmed('8983160')
self.assertEqual(sbml_model.getAnnotation().getModelIsDescribedBy()[0], publication_uri)
self.assertEqual(
sbml_model.getAnnotation().getModelIsDescribedBy()[0].getName(),
("Edelstein SJ(1), Schaad O, Henry E, Bertrand D, Changeux JP., "
+ "A kinetic mechanism for nicotinic acetylcholine receptors based on multiple allosteric transitions., "
+ "1. Biol Cybern. 1996 Nov;75(5):361-79.")
)
self.assertEqual(sbml_model.getAnnotation().getModelIsInstanceOf(), [])
self.assertEqual(sbml_model.getAnnotation().getModelUnknown(), [])
descriptions = [
u'physical compartment', u'protein complex', u'protein complex', u'protein complex',
u'multimer of macromolecules', u'protein complex', u'multimer of macromolecules', u'protein complex',
u'multimer of macromolecules', u'protein complex', u'protein complex', u'multimer of macromolecules',
u'protein complex', u'non-covalent binding', u'non-covalent binding', u'conformational transition',
u'non-covalent binding', u'non-covalent binding', u'conformational transition',
u'conformational transition', u'non-covalent binding', u'non-covalent binding',
u'conformational transition', u'conformational transition', u'conformational transition',
u'non-covalent binding', u'non-covalent binding', u'physical compartment', u'protein complex',
u'protein complex', u'protein complex', u'multimer of macromolecules', u'protein complex',
u'multimer of macromolecules', u'protein complex', u'multimer of macromolecules', u'protein complex',
u'protein complex', u'multimer of macromolecules', u'protein complex', u'non-covalent binding',
u'non-covalent binding', u'conformational transition', u'non-covalent binding', u'non-covalent binding',
u'conformational transition', u'conformational transition', u'non-covalent binding',
u'non-covalent binding', u'conformational transition', u'conformational transition',
u'conformational transition', u'non-covalent binding', u'non-covalent binding'
]
resolved_descriptions = sbml_model.getListOfSBOTermsDescriptions()
self.assertEqual(resolved_descriptions, descriptions) | gpl-3.0 | -2,590,696,538,973,808,600 | 45.912698 | 109 | 0.751438 | false |
rajul/tvb-framework | tvb/interfaces/command/demos/datatypes/search_and_export.py | 1 | 4973 | # -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Framework Package. This package holds all Data Management, and
# Web-UI helpful to run brain-simulations. To use it, you also need do download
# TheVirtualBrain-Scientific Package (for simulators). See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
Demo script on how to filter datatypes and later export them.
.. moduleauthor:: Lia Domide <[email protected]>
"""
if __name__ == "__main__":
from tvb.basic.profile import TvbProfile
TvbProfile.set_profile(TvbProfile.COMMAND_PROFILE)
from tvb.basic.filters.chain import FilterChain
from tvb.core.entities.file.files_helper import FilesHelper
from tvb.core.entities.storage import dao
from tvb.core.entities.transient.structure_entities import DataTypeMetaData
from tvb.datatypes.time_series import TimeSeriesRegion
from tvb.datatypes.connectivity import Connectivity
from sys import argv
import os
TVB_EXPORTER = "TVBExporter"
def _retrieve_entities_by_filters(kind, project_id, filters):
named_tuple_array, counter = dao.get_values_of_datatype(project_id, kind, filters)
print "Found " + str(counter) + " entities of type " + str(kind)
result = []
for named_tuple in named_tuple_array:
dt_id = named_tuple[0]
result.append(dao.get_generic_entity(kind, dt_id)[0])
return result
def search_and_export_ts(project_id, export_folder=os.path.join("~", "TVB")):
#### This is the simplest filter you could write: filter and entity by Subject
filter_connectivity = FilterChain(fields=[FilterChain.datatype + '.subject'],
operations=["=="],
values=[DataTypeMetaData.DEFAULT_SUBJECT])
connectivities = _retrieve_entities_by_filters(Connectivity, project_id, filter_connectivity)
#### A more complex filter: by linked entity (connectivity), BOLD monitor, sampling, operation param:
filter_timeseries = FilterChain(fields=[FilterChain.datatype + '._connectivity',
FilterChain.datatype + '._title',
FilterChain.datatype + '._sample_period',
FilterChain.datatype + '._sample_rate',
FilterChain.operation + '.parameters'
],
operations=["==", "like", ">=", "<=", "like"],
values=[connectivities[0].gid,
"Bold",
"500", "0.002",
'"conduction_speed": "3.0"'
]
)
#### If you want to filter another type of TS, change the kind class bellow,
#### instead of TimeSeriesRegion use TimeSeriesEEG, or TimeSeriesSurface, etc.
timeseries = _retrieve_entities_by_filters(TimeSeriesRegion, project_id, filter_timeseries)
for ts in timeseries:
print "============================="
print ts.summary_info
print " Original file: " + str(ts.get_storage_file_path())
destination_file = os.path.expanduser(os.path.join(export_folder, ts.get_storage_file_name()))
FilesHelper.copy_file(ts.get_storage_file_path(), destination_file)
if os.path.exists(destination_file):
print " TS file copied at: " + destination_file
else:
print " Some error happened when trying to copy at destination folder!!"
if __name__ == '__main__':
if len(argv) < 2:
PROJECT_ID = 1
else:
PROJECT_ID = int(argv[1])
print "We will try to search datatypes in project with ID:" + str(PROJECT_ID)
search_and_export_ts(PROJECT_ID) | gpl-2.0 | 4,476,070,721,329,385,000 | 40.45 | 105 | 0.626986 | false |
RedHatQE/rhui-testing-tools | rhui-tests/test_rhui_tcms178464.py | 1 | 2293 | #! /usr/bin/python -tt
import nose
from rhuilib.util import *
from rhuilib.rhui_testcase import *
from rhuilib.rhuimanager import *
from rhuilib.rhuimanager_cds import *
from rhuilib.rhuimanager_repo import *
from rhuilib.pulp_admin import PulpAdmin
from rhuilib.cds import RhuiCds
class test_tcms_178464(RHUITestcase):
def _setup(self):
'''[TCMS#178464 setup] Do initial rhui-manager run'''
RHUIManager.initial_run(self.rs.Instances["RHUA"][0])
'''[TCMS#178464 setup] Add cds '''
RHUIManagerCds.add_cds(self.rs.Instances["RHUA"][0], "Cluster1", self.rs.Instances["CDS"][0].private_hostname)
'''[TCMS#178464 setup] Create custom repo '''
RHUIManagerRepo.add_custom_repo(self.rs.Instances["RHUA"][0], "repo1")
'''[TCMS#178464 setup] Associate repos with clusters '''
RHUIManagerCds.associate_repo_cds(self.rs.Instances["RHUA"][0], "Cluster1", ["repo1"])
def _test(self):
'''[TCMS#178464 test] Check cds info screen '''
cds = RhuiCds(
hostname=self.rs.Instances["CDS"][0].private_hostname,
cluster="Cluster1",
repos=["repo1"]
)
nose.tools.assert_equal(RHUIManagerCds.info(self.rs.Instances["RHUA"][0], ["Cluster1"]), [cds])
'''[TCMS#178464 test] Check pulp-admin cds list '''
nose.tools.assert_equals(PulpAdmin.cds_list(self.rs.Instances["RHUA"][0]),
RHUIManagerCds.info(self.rs.Instances["RHUA"][0], ["Cluster1"]))
'''[TCMS#178464 test] Check certs created for custom repo '''
Expect.ping_pong(self.rs.Instances["RHUA"][0], "test -f /etc/pki/pulp/content/repo1/consumer-repo1.cert && echo SUCCESS", "[^ ]SUCCESS")
Expect.ping_pong(self.rs.Instances["RHUA"][0], "test -f /etc/pki/pulp/content/repo1/consumer-repo1.ca && echo SUCCESS", "[^ ]SUCCESS")
def _cleanup(self):
'''[TCMS#178464 cleanup] Remove cds '''
RHUIManagerCds.delete_cds(self.rs.Instances["RHUA"][0], "Cluster1", [self.rs.Instances["CDS"][0].private_hostname])
'''[TCMS#178464 cleanup] Delete custom repos '''
RHUIManagerRepo.delete_repo(self.rs.Instances["RHUA"][0], ["repo1"])
if __name__ == "__main__":
nose.run(defaultTest=__name__, argv=[__file__, '-v'])
| gpl-3.0 | -7,934,214,745,084,829,000 | 41.462963 | 144 | 0.632795 | false |
andreimaximov/algorithms | leetcode/algorithms/recover-binary-tree/solution.py | 1 | 2334 | #!/usr/bin/env python
class Solution(object):
def inorder(self, root):
"""
Returns the inorder traversal of nodes in the tree.
"""
current = root
stack = []
inorder = []
while current is not None or len(stack) > 0:
# Traverse to the left most (first) node in the tree rooted at the
# current node.
while current is not None:
stack.append(current)
current = current.left
# The stack is guaranteed to have at least one node due to the
# condition of the outer while loop.
current = stack.pop()
inorder.append(current)
# Current might be None after this, but then the algorithm will
# just continue traversing up.
current = current.right
return inorder
def findOutOfOrder(self, root):
"""
Finds the first two out of order nodes in a binary tree.
"""
inorder = self.inorder(root)
# Impossible for any values to be out of order with 0 or 1 nodes.
if len(inorder) < 2:
return ()
outOfOrder = [] # Stores indices where inorder[i] >= inorder[i + 1]
for i in range(0, len(inorder) - 1):
if inorder[i].val >= inorder[i + 1].val:
outOfOrder.append(i)
if len(outOfOrder) == 2:
break
n = len(outOfOrder)
if n == 0:
# No out of order nodes.
return ()
elif n == 1:
# Out of order nodes are next to each other.
i = outOfOrder[0]
return (inorder[i], inorder[i + 1])
elif n == 2:
# Out of order nodes are not next to each other.
i = outOfOrder[0]
j = outOfOrder[1]
return (inorder[i], inorder[j + 1])
def recoverTree(self, root):
nodes = self.findOutOfOrder(root)
assert len(nodes) == 2
# Swap the values in the two out of order nodes.
firstValue = nodes[0].val
nodes[0].val = nodes[1].val
nodes[1].val = firstValue
def main():
print('Please run this solution on LeetCode.')
print('https://leetcode.com/problems/recover-binary-search-tree/')
if __name__ == '__main__':
main()
| mit | 140,343,525,249,919,440 | 29.311688 | 78 | 0.532562 | false |
Rabuske/HeartRatePython | HeartRatePython/HeartRatePython/Token.py | 1 | 1293 | import datetime
class Token(object):
accessToken = None
refreshToken = None
generationDate = None
expiresIn = None # in seconds
type = None
userId = None
def __init__(self, userId, accessToken, refreshToken, type, expiresIn, generationDate=None):
self.userId = userId
self.accessToken = accessToken
self.refreshToken = refreshToken
self.type = type
self.expiresIn = expiresIn
self.generationDate = generationDate
if self.generationDate == None:
self.generationDate = datetime.datetime.utcnow().timestamp()
def isExpired(self):
expirationDate = float(self.generationDate) + float(self.expiresIn)
if datetime.datetime.utcnow().timestamp() > expirationDate:
return True
else:
return False
# Create an object of the type Token based on a response from the Requests package
def createFromDDIC(dictionary):
try:
return Token(dictionary[0], dictionary[1], dictionary[2], dictionary[3], dictionary[4], dictionary[5])
except KeyError as e:
return Token(dictionary["user_id"], dictionary["access_token"], dictionary["refresh_token"], dictionary["token_type"], dictionary["expires_in"])
| gpl-3.0 | 5,066,700,721,045,928,000 | 37.058824 | 164 | 0.649652 | false |
mpkasp/django-bom | bom/migrations/0031_auto_20200104_1352.py | 1 | 2301 | # Generated by Django 2.2.8 on 2020-01-04 13:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bom', '0030_auto_20200101_2253'),
]
operations = [
migrations.AlterField(
model_name='partrevision',
name='height_units',
field=models.CharField(blank=True, choices=[('', '-----'), ('mil', 'mil'), ('in', 'in'), ('ft', 'ft'), ('yd', 'yd'), ('km', 'km'), ('m', 'm'), ('cm', 'cm'), ('mm', 'mm'), ('um', 'μm'), ('nm', 'nm'), ('Other', 'Other')], default=None, max_length=5, null=True),
),
migrations.AlterField(
model_name='partrevision',
name='length_units',
field=models.CharField(blank=True, choices=[('', '-----'), ('mil', 'mil'), ('in', 'in'), ('ft', 'ft'), ('yd', 'yd'), ('km', 'km'), ('m', 'm'), ('cm', 'cm'), ('mm', 'mm'), ('um', 'μm'), ('nm', 'nm'), ('Other', 'Other')], default=None, max_length=5, null=True),
),
migrations.AlterField(
model_name='partrevision',
name='value_units',
field=models.CharField(blank=True, choices=[('', '-----'), ('Ohms', 'Ω'), ('mOhms', 'mΩ'), ('kOhms', 'kΩ'), ('F', 'F'), ('pF', 'pF'), ('nF', 'nF'), ('uF', 'μF'), ('V', 'V'), ('uV', 'μV'), ('mV', 'mV'), ('A', 'A'), ('uA', 'μA'), ('mA', 'mA'), ('C', '°C'), ('F', '°F'), ('H', 'H'), ('mH', 'mH'), ('uH', 'μH'), ('Hz', 'Hz'), ('kHz', 'kHz'), ('MHz', 'MHz'), ('GHz', 'GHz'), ('Other', 'Other')], default=None, max_length=5, null=True),
),
migrations.AlterField(
model_name='partrevision',
name='wavelength_units',
field=models.CharField(blank=True, choices=[('', '-----'), ('km', 'km'), ('m', 'm'), ('cm', 'cm'), ('mm', 'mm'), ('um', 'μm'), ('nm', 'nm'), ('A', 'Å'), ('Other', 'Other')], default=None, max_length=5, null=True),
),
migrations.AlterField(
model_name='partrevision',
name='width_units',
field=models.CharField(blank=True, choices=[('', '-----'), ('mil', 'mil'), ('in', 'in'), ('ft', 'ft'), ('yd', 'yd'), ('km', 'km'), ('m', 'm'), ('cm', 'cm'), ('mm', 'mm'), ('um', 'μm'), ('nm', 'nm'), ('Other', 'Other')], default=None, max_length=5, null=True),
),
]
| gpl-3.0 | -3,665,462,270,932,256,000 | 59.157895 | 442 | 0.453193 | false |
fjruizruano/ngs-protocols | mitobim_run.py | 1 | 3707 | #!/usr/bin/python
import sys
import os
from subprocess import call
from Bio import SeqIO
print "Usage: mitobim_run.py NumberOfReads ListOfFiles Reference [miramito/quickmito/seedmito] missmatch"
try:
nreads = sys.argv[1]
except:
nreads = raw_input("Introduce number of reads: ")
try:
lista = sys.argv[2]
except:
lista = raw_input("Introduce list of files: ")
try:
ref = sys.argv[3]
except:
ref = raw_input("Introduce Fasta file as reference: ")
try:
prot = sys.argv[4]
except:
prot = raw_input("Introduce protocol name (miramito/quickmito/seedmito): ")
try:
mism = sys.argv[5]
except:
mism = "15"
manifest = """echo "\n#manifest file for basic mapping assembly with illumina data using MIRA 4\n\nproject = initial-mapping-testpool-to-Salpinus-mt\n\njob=genome,mapping,accurate\n\nparameters = -NW:mrnl=0 -AS:nop=1 SOLEXA_SETTINGS -CO:msr=no\n\nreadgroup\nis_reference\ndata = reference.fa\nstrain = Salpinus-mt-genome\n\nreadgroup = reads\ndata = reads.fastq\ntechnology = solexa\nstrain = testpool\n" > manifest.conf"""
miramito = """mira manifest.conf && MITObim_1.8.pl --missmatch %s --clean -start 1 -end 1000 -sample testpool -ref Salpinus_mt_genome -readpool reads.fastq -maf initial-mapping-testpool-to-Salpinus-mt_assembly/initial-mapping-testpool-to-Salpinus-mt_d_results/initial-mapping-testpool-to-Salpinus-mt_out.maf > log""" % mism
quickmito = """MITObim_1.8.pl -start 1 -end 1000 -sample testpool -ref Salpinus_mt_genome -readpool reads.fastq --missmatch %s --quick reference.fa --clean > log""" % mism
seedmito = """MITObim_1.8.pl -sample testpool -ref Salpinus_mt_genome -readpool reads.fastq --quick reference.fa --missmatch %s -end 1000 --clean > log""" % mism
miramitoout = """/testpool-Salpinus_mt_genome_assembly/testpool-Salpinus_mt_genome_d_results/testpool-Salpinus_mt_genome_out_testpool.unpadded.fasta"""
pairs = open(lista).readlines()
npairs = len(pairs)/2
for npair in range(0,npairs):
pairone = pairs[npair*2][:-1]
pairtwo = pairs[(npair*2)+1][:-1]
name = ""
paironesplit = pairone.split(".")
if paironesplit[-1] == "gz":
name = ".".join(paironesplit[0:-2])
elif paironesplit[-1] == "fastq" or paironesplit[-1] == "fq":
name = ".".join(paironesplit[0:-1])
name = name[:-2]
foldername = "%s_%s" % (name,prot)
call("mkdir %s" % foldername , shell=True)
os.chdir(foldername)
print "\nStarting with " + name
call("seqtk sample -s100 ../%s %s > %s" % (pairone,nreads,name+".fq.subset1"), shell=True)
call("seqtk sample -s100 ../%s %s > %s" % (pairtwo,nreads,name+".fq.subset2"), shell=True)
call("shuffleSequences_fastq.pl %s %s %s" % (name+".fq.subset1",name+".fq.subset2",name+".shuffled.fastq"), shell=True)
call("ln -sf %s reads.fastq" % (name+".shuffled.fastq"), shell=True)
call("ln -sf ../%s reference.fa" % ref, shell=True)
if prot == "miramito":
call(manifest, shell=True)
call(miramito, shell=True)
elif prot == "quickmito":
call(quickmito, shell=True)
elif prot == "seedmito":
call(seedmito, shell=True)
else:
break
list_dir = os.listdir(".")
list_dir.sort()
iterations = []
for dir in list_dir:
if dir.startswith("iteration"):
iterations.append(dir)
os.chdir("../")
consensus = "%s/%s" % (foldername,iterations[-1]+miramitoout)
secus = SeqIO.parse(open(consensus), "fasta")
out = open("%s_%s.fa" % (name,prot), "w")
i = 0
for secu in secus:
i+=1
s = str(secu.seq)
s = s.replace("x","n")
out.write(">%s_%s_%s\n%s\n" % (name,prot,i, s))
out.close()
print name + " finalized!!!"
| gpl-3.0 | 8,431,332,318,362,962,000 | 37.216495 | 423 | 0.647694 | false |
eboreapps/Scikit-Learn-Playground | ScikitlearnPlayground/CrossValidationKNN.py | 1 | 1171 | #Copyright 2016 EBORE APPS (http://www.eboreapps.com)
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from sklearn.cross_validation import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
iris = load_iris()
X = iris.data
y = iris.target
k_range = range(1, 50)
k_scores = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
scores = cross_val_score(knn, X, y, scoring='accuracy', cv=10)
k_scores.append(scores.mean())
# visualize
plt.plot(k_range, k_scores)
plt.xlabel("Value of K for KNN")
plt.ylabel("Cross-Validated accuracy")
plt.show()
# best model is with n_neighbors = 20 | apache-2.0 | -6,109,181,488,564,701,000 | 27.585366 | 73 | 0.760034 | false |
skosukhin/spack | var/spack/repos/builtin/packages/everytrace/package.py | 1 | 2163 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Everytrace(CMakePackage):
"""Get stack trace EVERY time a program exits."""
homepage = "https://github.com/citibeth/everytrace"
url = "https://github.com/citibeth/everytrace/tarball/0.2.0"
version('0.2.0', '2af0e5b6255064d5191accebaa70d222')
version('develop',
git='https://github.com/citibeth/everytrace.git', branch='develop')
variant('mpi', default=True, description='Enables MPI parallelism')
variant('fortran', default=True,
description='Enable use with Fortran programs')
depends_on('mpi', when='+mpi')
def cmake_args(self):
spec = self.spec
return [
'-DUSE_MPI=%s' % ('YES' if '+mpi' in spec else 'NO'),
'-DUSE_FORTRAN=%s' % ('YES' if '+fortran' in spec else 'NO')]
def setup_environment(self, spack_env, run_env):
run_env.prepend_path('PATH', join_path(self.prefix, 'bin'))
| lgpl-2.1 | 4,433,843,952,145,421,000 | 41.411765 | 79 | 0.652797 | false |
JimBrown/MarlinTarantula | buildroot/share/PlatformIO/scripts/generic_create_variant.py | 1 | 1714 | #
# generic_create_variant.py
#
# Copy one of the variants from buildroot/platformio/variants into
# the appropriate framework variants folder, so that its contents
# will be picked up by PlatformIO just like any other variant.
#
import os,shutil,marlin
from SCons.Script import DefaultEnvironment
from platformio import util
env = DefaultEnvironment()
#
# Get the platform name from the 'platform_packages' option,
# or look it up by the platform.class.name.
#
platform = env.PioPlatform()
from platformio.package.meta import PackageSpec
platform_packages = env.GetProjectOption('platform_packages')
if len(platform_packages) == 0:
framewords = {
"Ststm32Platform": "framework-arduinoststm32",
"AtmelavrPlatform": "framework-arduino-avr"
}
platform_name = framewords[platform.__class__.__name__]
else:
platform_name = PackageSpec(platform_packages[0]).name
if platform_name in [ "usb-host-msc", "usb-host-msc-cdc-msc", "usb-host-msc-cdc-msc-2", "tool-stm32duino" ]:
platform_name = "framework-arduinoststm32"
FRAMEWORK_DIR = platform.get_package_dir(platform_name)
assert os.path.isdir(FRAMEWORK_DIR)
board = env.BoardConfig()
#mcu_type = board.get("build.mcu")[:-2]
variant = board.get("build.variant")
#series = mcu_type[:7].upper() + "xx"
# Prepare a new empty folder at the destination
variant_dir = os.path.join(FRAMEWORK_DIR, "variants", variant)
if os.path.isdir(variant_dir):
shutil.rmtree(variant_dir)
if not os.path.isdir(variant_dir):
os.mkdir(variant_dir)
# Source dir is a local variant sub-folder
source_dir = os.path.join("buildroot/share/PlatformIO/variants", variant)
assert os.path.isdir(source_dir)
marlin.copytree(source_dir, variant_dir)
| gpl-3.0 | 8,632,913,870,096,174,000 | 30.740741 | 108 | 0.738623 | false |
platsch/OctoPNP | octoprint_OctoPNP/ImageProcessing.py | 1 | 11846 | # -*- coding: utf-8 -*-
"""
This file is part of OctoPNP
OctoPNP is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OctoPNP is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OctoPNP. If not, see <http://www.gnu.org/licenses/>.
Main author: Florens Wasserfall <[email protected]>
"""
import cv2
import numpy as np
import math
import os
import shutil
class ImageProcessing:
def __init__(self, box_size, bed_cam_binary_thresh, head_cam_binary_thresh):
self.box_size=box_size
self.bed_binary_thresh = bed_cam_binary_thresh
self.head_binary_thresh = head_cam_binary_thresh
#self.lower_mask_color = np.array([22,28,26]) # green default
#self.upper_mask_color = np.array([103,255,255])
self.lower_mask_color = np.array([0,85,76])
self.upper_mask_color = np.array([100,255,255])
self._img_path = ""
self._last_saved_image_path = None
self._last_error = ""
self._interactive=False
self._debug = True
# Locates a part in a box. Box size must be given to constructor. Image must contain only
# one box with white background.
# Returns displacement with respect to the center of the box if a part is detected, False otherwise.
# boolean relative_to_camera sets wether the offset should be relative to the box or to the camera.
#===================================================================================================
def locatePartInBox(self,img_path, relative_to_camera):
result = False
self._img_path = img_path
# open image file
img=cv2.imread(img_path,cv2.IMREAD_COLOR)
#detect box boundaries
rotated_crop_rect = self._rotatedBoundingBox(img, self.head_binary_thresh, 0.6, 0.95)
if(rotated_crop_rect):
rotated_box = cv2.boxPoints(rotated_crop_rect)
left_x = int(min(rotated_box[0][0],rotated_box[1][0]))
right_x = int(max(rotated_box[2][0],rotated_box[3][0]))
upper_y = int(min(rotated_box[1][1],rotated_box[2][1]))
lower_y = int(max(rotated_box[0][1],rotated_box[3][1]))
# workaround for bounding boxes that are bigger then the image
if(left_x < 0): left_x = 0
if(upper_y < 0): upper_y = 0
if(right_x < 0): right_x = img.shape[1]
if(lower_y < 0): lower_y = img.shape[0]
#Crop image
img_crop=img[upper_y:lower_y, left_x:right_x]
# now find part inside the box
cm_rect = self._rotatedBoundingBox(img_crop, self.head_binary_thresh, 0.001, 0.7)
if(cm_rect):
cm_x = cm_rect[0][0]
cm_y = cm_rect[0][1]
res_x = img_crop.shape[1]
res_y = img_crop.shape[0]
displacement_x=(cm_x-res_x/2)*self.box_size/res_x
displacement_y=((res_y-cm_y)-res_y/2)*self.box_size/res_y
if relative_to_camera:
#incorporate the position of the tray box in relation to the image
displacement_x += (left_x - (img.shape[1]-right_x))/2 * self.box_size/res_x
displacement_y -= (upper_y - (img.shape[0]-(lower_y)))/2 * self.box_size/res_y
result = displacement_x,displacement_y
# Generate result image and return
cv2.circle(img_crop,(int(cm_x),int(cm_y)), 5, (0,255,0), -1)
filename="/finalcm_"+os.path.basename(self._img_path)
finalcm_path=os.path.dirname(self._img_path)+filename
cv2.imwrite(finalcm_path,img_crop)
self._last_saved_image_path = finalcm_path
if self._interactive: cv2.imshow("Part in box: ",img_crop)
if self._interactive: cv2.waitKey(0)
else:
self._last_error = "Unable to find part in box"
else:
self._last_error = "Unable to locate box"
return result
# Get part orientation by computing a rotated bounding box around contours
# and determining the main orientation of this box
# Returns the angle of main edges relativ to the
# next main axis [-45°:45°]
def getPartOrientation(self,img_path, pxPerMM, offset=0):
self._img_path = img_path
result = False
# open image file
img=cv2.imread(img_path,cv2.IMREAD_COLOR)
mask = self._maskBackground(img)
# we should use actual object size here
min_area_factor = pxPerMM**2 / (img.shape[1] * img.shape[0]) # 1mm²
rect = self._rotatedBoundingBox(img, 50, 0.005, 0.7, mask)
if(rect):
# draw rotated bounding box for visualization
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(img,[box],0,(0,0,255),2)
# compute rotation offset
rotation = rect[2] - offset
# normalize to positive PI range
if rotation < 0:
rotation = (rotation % -180) + 180
rotation = rotation % 90
result = -rotation if rotation < 45 else 90-rotation
if self._debug: print("Part deviation measured by bed camera: " + str(result))
else:
self._last_error = "Unable to locate part for finding the orientation"
if self._debug: print(self._last_error)
result = False
if self._interactive: cv2.imshow("contours",img)
if self._interactive: cv2.waitKey(0)
#save result as image for GUI
filename="/orientation_"+os.path.basename(self._img_path)
orientation_img_path=os.path.dirname(self._img_path)+filename
cv2.imwrite(orientation_img_path, img)
self._last_saved_image_path = orientation_img_path
return result
# Find the position of a (already rotated) part. Returns the offset between the
# center of the image and the parts center of mass, 0,0 if no part is detected.
#==============================================================================
def getPartPosition(self, img_path, pxPerMM):
self._img_path = img_path
result = False
# open image file
img=cv2.imread(img_path,cv2.IMREAD_COLOR)
mask = self._maskBackground(img)
res_x = img.shape[1]
res_y = img.shape[0]
# we should use actual object size here
min_area_factor = pxPerMM**2 / (res_x * res_y) # 1mm²
rect = self._rotatedBoundingBox(img, 50, min_area_factor, 0.7, mask)
if(rect):
cm_x = rect[0][0]
cm_y = rect[0][1]
displacement_x=(cm_x-res_x/2)/pxPerMM
displacement_y=((res_y-cm_y)-res_y/2)/pxPerMM
result = [displacement_x, -displacement_y]
else:
self._last_error = "Unable to locate part for correcting the position"
if self._debug: print(self._last_error)
result = False
# write image for UI
cv2.circle(img,(int(cm_x),int(cm_y)),5,(0,255,0),-1)
filename="/final_"+os.path.basename(self._img_path)
final_img_path=os.path.dirname(self._img_path)+filename
cv2.imwrite(final_img_path,img)
self._last_saved_image_path = final_img_path
if self._interactive: cv2.imshow("Center of Mass",img)
if self._interactive: cv2.waitKey(0)
return result
#==============================================================================
def getLastSavedImagePath(self):
if self._last_saved_image_path:
return self._last_saved_image_path
else:
return False
#==============================================================================
def getLastErrorMessage(self):
return self._last_error
#==============================================================================
def _rotatedBoundingBox(self, img, binary_thresh, min_area_factor, max_area_factor, binary_img = ()):
result = False
if (len(binary_img) == 0):
#convert image to grey and blur
gray_img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray_img=cv2.blur(gray_img, (3,3))
ret, binary_img = cv2.threshold(gray_img, binary_thresh, 255, cv2.THRESH_BINARY)
# depending on the OpenCV Version findContours returns 2 or 3 objects...
#contours, hierarchy = cv2.findContours(binary_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE, (0, 0));
contours = cv2.findContours(binary_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE, offset=(0, 0))[0]
#cv2.drawContours(img, contours, -1, (0,255,0), 3) # draw basic contours
minArea = binary_img.shape[0] * binary_img.shape[1] * min_area_factor; # how to find a better value??? input from part description?
maxArea = binary_img.shape[0] * binary_img.shape[1] * max_area_factor # Y*X | don't detect full image
rectPoints = [];
for contour in contours:
rect = cv2.minAreaRect(contour)
rectArea = rect[1][0] * rect[1][1]
if(rectArea > minArea and rectArea < maxArea):
box = cv2.boxPoints(rect)
for point in box:
rectPoints.append(np.array(point, dtype=np.int32))
if self._interactive: box = np.int0(box)
if self._interactive: cv2.drawContours(img,[box],0,(0,0,255),2)
#cv2.imshow("contours",binary_img)
#cv2.waitKey(0)
if self._interactive: cv2.imshow("Binarized image",binary_img)
if self._interactive: cv2.waitKey(0)
if self._interactive: cv2.imshow("contours",img)
if self._interactive: cv2.waitKey(0)
if (len(rectPoints) >= 4):
rectArray = np.array(rectPoints)
rect = cv2.minAreaRect(rectArray)
# draw rotated bounding box for visualization
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(img,[box],0,(0,0,255),2)
result = rect
else:
self._last_error = "Unable to find contour in image"
return result
# Compute a binary image / mask by removing all pixels in the given color range
# mask_corners: remove all pixels outside a circle touching the image boundaries
# to crop badly illuminated corners
#==============================================================================
def _maskBackground(self, img, mask_corners = True):
h,w,c = np.shape(img)
blur_img=cv2.blur(img, (5,5))
hsv = cv2.cvtColor(blur_img, cv2.COLOR_BGR2HSV)
lower_color = np.array([22,28,26])
upper_color = np.array([103,255,255])
# create binary mask by finding background color range
mask = cv2.inRange(hsv, self.lower_mask_color, self.upper_mask_color)
# remove the corners from mask since they are prone to illumination problems
if(mask_corners):
circle_mask = np.zeros((h, w), np.uint8)
circle_mask[:, :] = 255
cv2.circle(circle_mask,(int(w/2), int(h/2)), min(int(w/2), int(h/2)), 0, -1)
mask = cv2.bitwise_or(mask,circle_mask)
# invert mask to get white objects on black background
#inverse_mask = 255 - mask
if self._interactive: cv2.imshow("binary mask", mask)
if self._interactive: cv2.waitKey(0)
return mask
| agpl-3.0 | -1,268,269,883,371,438,300 | 39.006757 | 139 | 0.574565 | false |
neilpanchal/iPython-Notebook-Profile | profile_neil/ipython_config.py | 1 | 20604 | # Configuration file for ipython.
c = get_config()
#------------------------------------------------------------------------------
# InteractiveShellApp configuration
#------------------------------------------------------------------------------
# A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.InteractiveShellApp.pylab_import_all = True
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.InteractiveShellApp.exec_PYTHONSTARTUP = True
# dotted module name of an IPython extension to load.
# c.InteractiveShellApp.extra_extension = ''
# Execute the given command string.
# c.InteractiveShellApp.code_to_run = ''
# A file to be run
# c.InteractiveShellApp.file_to_run = ''
# A list of dotted module names of IPython extensions to load.
# c.InteractiveShellApp.extensions = []
# lines of code to run at IPython startup.
# c.InteractiveShellApp.exec_lines = []
# List of files to run at IPython startup.
# c.InteractiveShellApp.exec_files = []
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.InteractiveShellApp.hide_initial_ns = True
# Reraise exceptions encountered loading IPython extensions?
# c.InteractiveShellApp.reraise_ipython_extension_failures = False
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.InteractiveShellApp.pylab = None
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.InteractiveShellApp.gui = None
# Run the module as a script.
# c.InteractiveShellApp.module_to_run = ''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.InteractiveShellApp.matplotlib = None
#------------------------------------------------------------------------------
# TerminalIPythonApp configuration
#------------------------------------------------------------------------------
# TerminalIPythonApp will inherit config from: BaseIPythonApplication,
# Application, InteractiveShellApp
# Whether to overwrite existing config files when copying
# c.TerminalIPythonApp.overwrite = False
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.TerminalIPythonApp.force_interact = False
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.TerminalIPythonApp.gui = None
# lines of code to run at IPython startup.
# c.TerminalIPythonApp.exec_lines = []
# List of files to run at IPython startup.
# c.TerminalIPythonApp.exec_files = []
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.TerminalIPythonApp.ipython_dir = ''
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.TerminalIPythonApp.hide_initial_ns = True
# Reraise exceptions encountered loading IPython extensions?
# c.TerminalIPythonApp.reraise_ipython_extension_failures = False
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.TerminalIPythonApp.pylab = None
# The Logging format template
# c.TerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.TerminalIPythonApp.extra_config_file = ''
# The date format used by logging formatters for %(asctime)s
# c.TerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.TerminalIPythonApp.exec_PYTHONSTARTUP = True
# Set the log level by value or name.
# c.TerminalIPythonApp.log_level = 30
# Whether to display a banner upon starting IPython.
# c.TerminalIPythonApp.display_banner = True
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.TerminalIPythonApp.pylab_import_all = True
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.TerminalIPythonApp.matplotlib = None
# dotted module name of an IPython extension to load.
# c.TerminalIPythonApp.extra_extension = ''
# Execute the given command string.
# c.TerminalIPythonApp.code_to_run = ''
# A file to be run
# c.TerminalIPythonApp.file_to_run = ''
# A list of dotted module names of IPython extensions to load.
# c.TerminalIPythonApp.extensions = []
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.TerminalIPythonApp.verbose_crash = False
# Suppress warning messages about legacy config files
# c.TerminalIPythonApp.ignore_old_config = False
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.TerminalIPythonApp.copy_config_files = False
# Run the module as a script.
# c.TerminalIPythonApp.module_to_run = ''
# Start IPython quickly by skipping the loading of config files.
# c.TerminalIPythonApp.quick = False
# The IPython profile to use.
# c.TerminalIPythonApp.profile = 'default'
#------------------------------------------------------------------------------
# TerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# TerminalInteractiveShell will inherit config from: InteractiveShell
# The part of the banner to be printed after the profile
# c.TerminalInteractiveShell.banner2 = ''
# auto editing of files with syntax errors.
# c.TerminalInteractiveShell.autoedit_syntax = False
# Show rewritten input, e.g. for autocall.
# c.TerminalInteractiveShell.show_rewritten_input = True
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.TerminalInteractiveShell.deep_reload = False
#
# c.TerminalInteractiveShell.object_info_string_level = 0
# Automatically call the pdb debugger after every exception.
# c.TerminalInteractiveShell.pdb = False
#
# c.TerminalInteractiveShell.quiet = False
#
# c.TerminalInteractiveShell.debug = False
# Deprecated, use PromptManager.in2_template
# c.TerminalInteractiveShell.prompt_in2 = ' .\\D.: '
# The shell program to be used for paging.
# c.TerminalInteractiveShell.pager = 'less'
# Deprecated, use PromptManager.out_template
# c.TerminalInteractiveShell.prompt_out = 'Out[\\#]: '
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.TerminalInteractiveShell.screen_length = 0
# Deprecated, use PromptManager.in_template
# c.TerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
#
# c.TerminalInteractiveShell.ipython_dir = ''
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.TerminalInteractiveShell.color_info = True
# The name of the logfile to use.
# c.TerminalInteractiveShell.logfile = ''
#
# c.TerminalInteractiveShell.separate_out = ''
#
# c.TerminalInteractiveShell.history_length = 10000
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.TerminalInteractiveShell.cache_size = 1000
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.TerminalInteractiveShell.display_page = False
#
# c.TerminalInteractiveShell.readline_remove_delims = '-/~'
#
# c.TerminalInteractiveShell.xmode = 'Context'
# Enable magic commands to be called without the leading %.
# c.TerminalInteractiveShell.automagic = True
#
# c.TerminalInteractiveShell.wildcards_case_sensitive = True
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.TerminalInteractiveShell.logappend = ''
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.TerminalInteractiveShell.ast_node_interactivity = 'last_expr'
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.TerminalInteractiveShell.logstart = False
#
# c.TerminalInteractiveShell.separate_out2 = ''
# The part of the banner to be printed before the profile
# c.TerminalInteractiveShell.banner1 = 'Python 3.4.3 |Anaconda 2.2.0 (x86_64)| (default, Mar 6 2015, 12:07:41) \nType "copyright", "credits" or "license" for more information.\n\nIPython 3.1.0 -- An enhanced Interactive Python.\nAnaconda is brought to you by Continuum Analytics.\nPlease check out: http://continuum.io/thanks and https://binstar.org\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.TerminalInteractiveShell.ast_transformers = []
# Save multi-line entries as one entry in readline history
# c.TerminalInteractiveShell.multiline_history = True
# Autoindent IPython code entered interactively.
# c.TerminalInteractiveShell.autoindent = True
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.TerminalInteractiveShell.confirm_exit = True
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.TerminalInteractiveShell.editor = 'vi'
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.TerminalInteractiveShell.autocall = 0
#
# c.TerminalInteractiveShell.readline_use = True
# Set the color scheme (NoColor, Linux, or LightBG).
# c.TerminalInteractiveShell.colors = 'LightBG'
# Don't call post-execute functions that have failed in the past.
# c.TerminalInteractiveShell.disable_failing_post_execute = False
#
# c.TerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
#
# c.TerminalInteractiveShell.separate_in = '\n'
# Deprecated, use PromptManager.justify
# c.TerminalInteractiveShell.prompts_pad_left = True
# Enable auto setting the terminal title.
# c.TerminalInteractiveShell.term_title = False
#------------------------------------------------------------------------------
# PromptManager configuration
#------------------------------------------------------------------------------
# This is the primary interface for producing IPython's prompts.
# Output prompt. '\#' will be transformed to the prompt number
# c.PromptManager.out_template = 'Out[\\#]: '
# If True (default), each prompt will be right-aligned with the preceding one.
# c.PromptManager.justify = True
# Input prompt. '\#' will be transformed to the prompt number
# c.PromptManager.in_template = 'In [\\#]: '
# Continuation prompt.
# c.PromptManager.in2_template = ' .\\D.: '
#
# c.PromptManager.color_scheme = 'Linux'
#------------------------------------------------------------------------------
# HistoryManager configuration
#------------------------------------------------------------------------------
# A class to organize all history-related functionality in one place.
# HistoryManager will inherit config from: HistoryAccessor
# Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
# c.HistoryManager.connection_options = {}
# Should the history database include output? (default: no)
# c.HistoryManager.db_log_output = False
# Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
# c.HistoryManager.hist_file = ''
# enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
# c.HistoryManager.enabled = True
# Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
# c.HistoryManager.db_cache_size = 0
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# PlainTextFormatter configuration
#------------------------------------------------------------------------------
# The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
# PlainTextFormatter will inherit config from: BaseFormatter
# Truncate large collections (lists, dicts, tuples, sets) to this size.
#
# Set to 0 to disable truncation.
# c.PlainTextFormatter.max_seq_length = 1000
#
# c.PlainTextFormatter.float_precision = ''
#
# c.PlainTextFormatter.type_printers = {}
#
# c.PlainTextFormatter.singleton_printers = {}
#
# c.PlainTextFormatter.verbose = False
#
# c.PlainTextFormatter.max_width = 79
#
# c.PlainTextFormatter.pprint = True
#
# c.PlainTextFormatter.deferred_printers = {}
#
# c.PlainTextFormatter.newline = '\n'
#------------------------------------------------------------------------------
# IPCompleter configuration
#------------------------------------------------------------------------------
# Extension of the completer class with IPython-specific features
# IPCompleter will inherit config from: Completer
# Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
# c.IPCompleter.merge_completions = True
# Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
# c.IPCompleter.omit__names = 2
# Activate greedy completion
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
# c.IPCompleter.greedy = False
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
# c.IPCompleter.limit_to__all__ = False
#------------------------------------------------------------------------------
# ScriptMagics configuration
#------------------------------------------------------------------------------
# Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
# Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
# c.ScriptMagics.script_magics = []
# Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
# c.ScriptMagics.script_paths = {}
#------------------------------------------------------------------------------
# StoreMagics configuration
#------------------------------------------------------------------------------
# Lightweight persistence for python variables.
#
# Provides the %store magic.
# If True, any %store-d variables will be automatically restored when IPython
# starts.
# c.StoreMagics.autorestore = False
| mit | -7,848,528,247,370,249,000 | 36.530055 | 565 | 0.681518 | false |
pgmillon/ansible | lib/ansible/modules/database/postgresql/postgresql_ping.py | 1 | 3674 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: postgresql_ping
short_description: Check remote PostgreSQL server availability
description:
- Simple module to check remote PostgreSQL server availability.
version_added: '2.8'
options:
db:
description:
- Name of a database to connect to.
type: str
aliases:
- login_db
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment: postgres
'''
EXAMPLES = r'''
# PostgreSQL ping dbsrv server from the shell:
# ansible dbsrv -m postgresql_ping
# In the example below you need to generate certificates previously.
# See https://www.postgresql.org/docs/current/libpq-ssl.html for more information.
- name: PostgreSQL ping dbsrv server using not default credentials and ssl
postgresql_ping:
db: protected_db
login_host: dbsrv
login_user: secret
login_password: secret_pass
ca_cert: /root/root.crt
ssl_mode: verify-full
'''
RETURN = r'''
is_available:
description: PostgreSQL server availability.
returned: always
type: bool
sample: true
server_version:
description: PostgreSQL server version.
returned: always
type: dict
sample: { major: 10, minor: 1 }
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.postgres import (
connect_to_db,
exec_sql,
get_conn_params,
postgres_common_argument_spec,
)
# ===========================================
# PostgreSQL module specific support methods.
#
class PgPing(object):
def __init__(self, module, cursor):
self.module = module
self.cursor = cursor
self.is_available = False
self.version = {}
def do(self):
self.get_pg_version()
return (self.is_available, self.version)
def get_pg_version(self):
query = "SELECT version()"
raw = exec_sql(self, query, add_to_executed=False)[0][0]
if raw:
self.is_available = True
raw = raw.split()[1].split('.')
self.version = dict(
major=int(raw[0]),
minor=int(raw[1]),
)
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
db=dict(type='str', aliases=['login_db']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
# Set some default values:
cursor = False
db_connection = False
result = dict(
changed=False,
is_available=False,
server_version=dict(),
)
conn_params = get_conn_params(module, module.params)
db_connection = connect_to_db(module, conn_params, fail_on_conn=False)
if db_connection is not None:
cursor = db_connection.cursor(cursor_factory=DictCursor)
# Do job:
pg_ping = PgPing(module, cursor)
if cursor:
# If connection established:
result["is_available"], result["server_version"] = pg_ping.do()
db_connection.rollback()
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 6,587,041,869,324,349,000 | 23.657718 | 92 | 0.633642 | false |
eharney/cinder | cinder/tests/unit/api/v3/test_attachments.py | 1 | 12523 | # Copyright (C) 2017 HuaWei Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for attachments Api.
"""
import ddt
import mock
import webob
from cinder.api import microversions as mv
from cinder.api.v3 import attachments as v3_attachments
from cinder import context
from cinder import exception
from cinder import objects
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
from cinder.volume import api as volume_api
from cinder.volume import rpcapi as volume_rpcapi
@ddt.ddt
class AttachmentsAPITestCase(test.TestCase):
"""Test Case for attachment API."""
def setUp(self):
super(AttachmentsAPITestCase, self).setUp()
self.controller = v3_attachments.AttachmentsController()
self.volume_api = volume_api.API()
self.ctxt = context.RequestContext(fake.USER_ID, fake.PROJECT_ID,
auth_token=True,
is_admin=True)
self.volume1 = self._create_volume(display_name='fake_volume_1',
project_id=fake.PROJECT_ID)
self.volume2 = self._create_volume(display_name='fake_volume_2',
project_id=fake.PROJECT2_ID)
self.attachment1 = self._create_attachment(
volume_uuid=self.volume1.id, instance_uuid=fake.UUID1)
self.attachment2 = self._create_attachment(
volume_uuid=self.volume1.id, instance_uuid=fake.UUID1)
self.attachment3 = self._create_attachment(
volume_uuid=self.volume1.id, instance_uuid=fake.UUID2)
self.attachment4 = self._create_attachment(
volume_uuid=self.volume2.id, instance_uuid=fake.UUID2)
self.addCleanup(self._cleanup)
def _cleanup(self):
self.attachment1.destroy()
self.attachment2.destroy()
self.attachment3.destroy()
self.attachment4.destroy()
self.volume1.destroy()
self.volume2.destroy()
def _create_volume(self, ctxt=None, display_name=None, project_id=None):
"""Create a volume object."""
ctxt = ctxt or self.ctxt
volume = objects.Volume(ctxt)
volume.display_name = display_name
volume.project_id = project_id
volume.status = 'available'
volume.attach_status = 'attached'
volume.create()
return volume
def test_create_attachment(self):
req = fakes.HTTPRequest.blank('/v3/%s/attachments' %
fake.PROJECT_ID,
version=mv.NEW_ATTACH)
body = {
"attachment":
{
"connector": None,
"instance_uuid": fake.UUID1,
"volume_uuid": self.volume1.id
},
}
attachment = self.controller.create(req, body)
self.assertEqual(self.volume1.id,
attachment['attachment']['volume_id'])
self.assertEqual(fake.UUID1,
attachment['attachment']['instance'])
@mock.patch.object(volume_rpcapi.VolumeAPI, 'attachment_update')
def test_update_attachment(self, mock_update):
fake_connector = {'fake_key': 'fake_value'}
mock_update.return_value = fake_connector
req = fakes.HTTPRequest.blank('/v3/%s/attachments/%s' %
(fake.PROJECT_ID, self.attachment1.id),
version=mv.NEW_ATTACH,
use_admin_context=True)
body = {
"attachment":
{
"connector": {'fake_key': 'fake_value'},
},
}
attachment = self.controller.update(req, self.attachment1.id, body)
self.assertEqual(fake_connector,
attachment['attachment']['connection_info'])
self.assertEqual(fake.UUID1, attachment['attachment']['instance'])
@mock.patch.object(objects.VolumeAttachment, 'get_by_id')
def test_attachment_operations_not_authorized(self, mock_get):
mock_get.return_value = {'project_id': fake.PROJECT2_ID}
req = fakes.HTTPRequest.blank('/v3/%s/attachments/%s' %
(fake.PROJECT_ID, self.attachment1.id),
version=mv.NEW_ATTACH,
use_admin_context=False)
body = {
"attachment":
{
"connector": {'fake_key': 'fake_value'},
},
}
self.assertRaises(exception.NotAuthorized,
self.controller.update, req,
self.attachment1.id, body)
self.assertRaises(exception.NotAuthorized,
self.controller.delete, req,
self.attachment1.id)
@ddt.data(mv.get_prior_version(mv.RESOURCE_FILTER),
mv.RESOURCE_FILTER, mv.LIKE_FILTER)
@mock.patch('cinder.api.common.reject_invalid_filters')
def test_attachment_list_with_general_filter(self, version, mock_update):
url = '/v3/%s/attachments' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url,
version=version,
use_admin_context=False)
self.controller.index(req)
if version != mv.get_prior_version(mv.RESOURCE_FILTER):
support_like = True if version == mv.LIKE_FILTER else False
mock_update.assert_called_once_with(req.environ['cinder.context'],
mock.ANY, 'attachment',
support_like)
@ddt.data('reserved', 'attached')
@mock.patch.object(volume_rpcapi.VolumeAPI, 'attachment_delete')
def test_delete_attachment(self, status, mock_delete):
volume1 = self._create_volume(display_name='fake_volume_1',
project_id=fake.PROJECT_ID)
attachment = self._create_attachment(
volume_uuid=volume1.id, instance_uuid=fake.UUID1,
attach_status=status)
req = fakes.HTTPRequest.blank('/v3/%s/attachments/%s' %
(fake.PROJECT_ID, attachment.id),
version=mv.NEW_ATTACH,
use_admin_context=True)
self.controller.delete(req, attachment.id)
volume2 = objects.Volume.get_by_id(self.ctxt, volume1.id)
if status == 'reserved':
self.assertEqual('detached', volume2.attach_status)
self.assertRaises(
exception.VolumeAttachmentNotFound,
objects.VolumeAttachment.get_by_id, self.ctxt, attachment.id)
else:
self.assertEqual('attached', volume2.attach_status)
mock_delete.assert_called_once_with(req.environ['cinder.context'],
attachment.id, mock.ANY)
def _create_attachment(self, ctxt=None, volume_uuid=None,
instance_uuid=None, mountpoint=None,
attach_time=None, detach_time=None,
attach_status=None, attach_mode=None):
"""Create an attachment object."""
ctxt = ctxt or self.ctxt
attachment = objects.VolumeAttachment(ctxt)
attachment.volume_id = volume_uuid
attachment.instance_uuid = instance_uuid
attachment.mountpoint = mountpoint
attachment.attach_time = attach_time
attachment.detach_time = detach_time
attachment.attach_status = attach_status or 'reserved'
attachment.attach_mode = attach_mode
attachment.create()
return attachment
@ddt.data("instance_uuid", "volume_uuid")
def test_create_attachment_without_resource_uuid(self, resource_uuid):
req = fakes.HTTPRequest.blank('/v3/%s/attachments' %
fake.PROJECT_ID,
version=mv.NEW_ATTACH)
body = {
"attachment":
{
"connector": None
}
}
body["attachment"][resource_uuid] = "test_id"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
@ddt.data(False, True)
def test_list_attachments(self, is_detail):
url = '/v3/%s/attachments' % fake.PROJECT_ID
list_func = self.controller.index
if is_detail:
url = '/v3/%s/groups/detail' % fake.PROJECT_ID
list_func = self.controller.detail
req = fakes.HTTPRequest.blank(url, version=mv.NEW_ATTACH,
use_admin_context=True)
res_dict = list_func(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(3, len(res_dict['attachments']))
self.assertEqual(self.attachment3.id,
res_dict['attachments'][0]['id'])
def test_list_attachments_with_limit(self):
url = '/v3/%s/attachments?limit=1' % fake.PROJECT_ID
req = fakes.HTTPRequest.blank(url, version=mv.NEW_ATTACH,
use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(1, len(res_dict['attachments']))
def test_list_attachments_with_marker(self):
url = '/v3/%s/attachments?marker=%s' % (fake.PROJECT_ID,
self.attachment3.id)
req = fakes.HTTPRequest.blank(url, version=mv.NEW_ATTACH,
use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(2, len(res_dict['attachments']))
self.assertEqual(self.attachment2.id,
res_dict['attachments'][0]['id'])
@ddt.data("desc", "asc")
def test_list_attachments_with_sort(self, sort_dir):
url = '/v3/%s/attachments?sort_key=id&sort_dir=%s' % (fake.PROJECT_ID,
sort_dir)
req = fakes.HTTPRequest.blank(url, version=mv.NEW_ATTACH,
use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(3, len(res_dict['attachments']))
order_ids = sorted([self.attachment1.id,
self.attachment2.id,
self.attachment3.id])
expect_result = order_ids[2] if sort_dir == "desc" else order_ids[0]
self.assertEqual(expect_result,
res_dict['attachments'][0]['id'])
@ddt.data({'admin': True, 'request_url': '?all_tenants=1', 'count': 4},
{'admin': False, 'request_url': '?all_tenants=1', 'count': 3},
{'admin': True, 'request_url':
'?all_tenants=1&project_id=%s' % fake.PROJECT2_ID,
'count': 1},
{'admin': False, 'request_url': '', 'count': 3},
{'admin': False, 'request_url': '?instance_id=%s' % fake.UUID1,
'count': 2},
{'admin': False, 'request_url': '?instance_id=%s' % fake.UUID2,
'count': 1})
@ddt.unpack
def test_list_attachment_with_tenants(self, admin, request_url, count):
url = '/v3/%s/attachments%s' % (fake.PROJECT_ID, request_url)
req = fakes.HTTPRequest.blank(url, version=mv.NEW_ATTACH,
use_admin_context=admin)
res_dict = self.controller.index(req)
self.assertEqual(1, len(res_dict))
self.assertEqual(count, len(res_dict['attachments']))
| apache-2.0 | -1,691,232,541,652,164,600 | 42.482639 | 78 | 0.556656 | false |
chrisspen/homebot | src/test/google_speech/vad_example.py | 1 | 3455 | #!/usr/bin/env python
import collections
import contextlib
import sys
import wave
import webrtcvad
def read_wave(path):
with contextlib.closing(wave.open(path, 'rb')) as wf:
num_channels = wf.getnchannels()
assert num_channels == 1
sample_width = wf.getsampwidth()
assert sample_width == 2
sample_rate = wf.getframerate()
assert sample_rate in (8000, 16000, 32000), 'Sample rate is: %s' % sample_rate
pcm_data = wf.readframes(wf.getnframes())
return pcm_data, sample_rate
def write_wave(path, audio, sample_rate):
with contextlib.closing(wave.open(path, 'wb')) as wf:
wf.setnchannels(1)
wf.setsampwidth(2)
wf.setframerate(sample_rate)
wf.writeframes(audio)
class Frame(object):
def __init__(self, bytes, timestamp, duration):
self.bytes = bytes
self.timestamp = timestamp
self.duration = duration
def frame_generator(frame_duration_ms, audio, sample_rate):
n = int(sample_rate * (frame_duration_ms / 1000.0) * 2)
offset = 0
timestamp = 0.0
duration = (float(n) / sample_rate) / 2.0
while offset + n < len(audio):
yield Frame(audio[offset:offset + n], timestamp, duration)
timestamp += duration
offset += n
def vad_collector(sample_rate, frame_duration_ms,
padding_duration_ms, vad, frames):
num_padding_frames = int(padding_duration_ms / frame_duration_ms)
ring_buffer = collections.deque(maxlen=num_padding_frames)
triggered = False
voiced_frames = []
for frame in frames:
sys.stdout.write(
'1' if vad.is_speech(frame.bytes, sample_rate) else '0')
if not triggered:
ring_buffer.append(frame)
num_voiced = len([f for f in ring_buffer
if vad.is_speech(f.bytes, sample_rate)])
if num_voiced > 0.9 * ring_buffer.maxlen:
sys.stdout.write('+(%s)' % (ring_buffer[0].timestamp,))
triggered = True
voiced_frames.extend(ring_buffer)
ring_buffer.clear()
else:
voiced_frames.append(frame)
ring_buffer.append(frame)
num_unvoiced = len([f for f in ring_buffer
if not vad.is_speech(f.bytes, sample_rate)])
if num_unvoiced > 0.9 * ring_buffer.maxlen:
sys.stdout.write('-(%s)' % (frame.timestamp + frame.duration))
triggered = False
yield b''.join([f.bytes for f in voiced_frames])
ring_buffer.clear()
voiced_frames = []
if triggered:
sys.stdout.write('-(%s)' % (frame.timestamp + frame.duration))
sys.stdout.write('\n')
if voiced_frames:
yield b''.join([f.bytes for f in voiced_frames])
def main(args):
if len(args) != 2:
sys.stderr.write(
'Usage: example.py <aggressiveness> <path to wav file>\n')
sys.exit(1)
audio, sample_rate = read_wave(args[1])
vad = webrtcvad.Vad(int(args[0]))
frames = frame_generator(30, audio, sample_rate)
frames = list(frames)
segments = vad_collector(sample_rate, 30, 300, vad, frames)
for i, segment in enumerate(segments):
path = 'chunk-%002d.wav' % (i,)
print(' Writing %s' % (path,))
write_wave(path, segment, sample_rate)
if __name__ == '__main__':
main(sys.argv[1:])
| mit | 2,947,711,968,839,872,500 | 33.207921 | 86 | 0.582923 | false |
staudt/everest-legacy | everest.py | 1 | 2359 | #!/usr/bin/env python
import sys
from lib import test_runner, reports
def main(testset_file, params):
config_file = 'default.cfg'
if 'config' in params.keys(): config_file = params['config']
runner = test_runner.instantiate_runner(config_file)
if not runner: return False, 'Couldn\'t load configuration file'
if 'base_url' in params.keys(): runner.base_url = params['base_url']
if 'proxy' in params.keys(): runner.proxy = params['proxy'].replace('http://', '')
worked, reason = runner.parse_testset(testset_file)
if not worked: return False, 'Failed parsing XML: %s' % reason
print 'Running Testset "%s"' % testset_file
passed = runner.run_tests(params['tags'])
report_name = None if 'output' not in params else params['output']
filenames = reports.generate(runner, testset_file, report_name)
if filenames:
print 'Test Report available at "%s"\n' % filenames
return True if passed else False, '' if passed else 'One or more testcases failed'
if __name__ == '__main__':
testset_file, params = None, {'tags' : []}
flags = { '-c' : 'config', '-o' : 'output', '-t' : 'tags',
'-base-url' : 'base_url', '-proxy' : 'proxy' }
flag = None
for a in sys.argv[1:]:
if flag:
if flag == '-t': params['tags'].append(a) #tags are more complicated
else: params[flags[flag]] = a
flag = None
else:
if a.lower() in flags.keys():
flag = a.lower()
else:
testset_file = a
if not testset_file:
print '''Welcome to EVEREST 2.0 Test Tool
ERROR: You need to inform the Testset XML file.
Usage Syntax:
python everest.py [optional parameters] <testset_file>
Optional parameters:
-c <config_file> Sets the config file to be used
-t [-]<TAG> Adds or remove a tag
-o <report_file> Sets the output report filename (no extension)
-base-url <URL> Overwrites the BASE-URL from the configuration file
-proxy <PROXY:PORT> Overwrites the PROXY from the configuration file
Examples:
python everest.py tests/test.xml (uses default config and template)
python everest.py -c default.cfg -o reports/my_report tests/test.xml
python everest.py -t smoke tests/test.xml (run tests tagged as "smoke")'''
sys.exit(-1)
else:
passed, reason = main(testset_file, params)
if not passed:
print 'Exiting with Error: %s' % reason
sys.exit(-1)
| mit | -4,124,931,762,674,862,000 | 35.444444 | 83 | 0.660873 | false |
dhanababum/dj-wkhtmltopdf | djwkhtmltopdf/url_helper.py | 1 | 2993 | import re
from django.core.exceptions import ViewDoesNotExist
from django.core.urlresolvers import RegexURLPattern, RegexURLResolver
from django.conf import settings
def extract_views_from_urlpatterns(urlpatterns, base='', namespace=None):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a two-tuple: (view_func, regex)
:param: urlpatterns: It will take url patterns.
:type: list
:param: base: string
:type: base: str or unicode
:param: namespace: namespace will doesn't allow collision.
:type: namespace: str or unicode
:returns: (view_func, regex)
:raise: ViewDoesNotExist: if view doesn't exists
:raise: ImportError: I can't help you
"""
views = []
for p in urlpatterns:
if isinstance(p, RegexURLPattern):
try:
if not p.name:
name = p.name
elif namespace:
name = '{0}:{1}'.format(namespace, p.name)
else:
name = p.name
views.append((p.callback, base + p.regex.pattern, name))
except ViewDoesNotExist:
continue
elif isinstance(p, RegexURLResolver):
try:
patterns = p.url_patterns # Hi there
except ImportError:
continue
views.extend(extract_views_from_urlpatterns(
patterns,
base + p.regex.pattern,
namespace=(namespace or p.namespace)))
elif hasattr(p, '_get_callback'):
try:
views.append(
(p._get_callback(),
base + p.regex.pattern,
p.name))
except ViewDoesNotExist:
continue
elif hasattr(p, 'url_patterns') or hasattr(p, '_get_url_patterns'):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(extract_views_from_urlpatterns(
patterns, base + p.regex.pattern,
namespace=namespace))
else:
raise TypeError("%s does not appear to be a urlpattern object" % p)
return views
def get_all_views():
"""
Collecting all views from top level project
"""
views = []
try:
urlconf = __import__(settings.ROOT_URLCONF, {}, {}, [''])
except Exception as e:
print(e)
pass
view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns)
for (func, regex, url_name) in view_functions:
if hasattr(func, '__name__'):
func_name = func.__name__
elif hasattr(func, '__class__'):
func_name = '%s()' % func.__class__.__name__
else:
func_name = re.sub(r' at 0x[0-9a-f]+', '', repr(func))
module = '{0}.{1}'.format(func.__module__, func_name)
views.append((module, module))
return views
| bsd-2-clause | -2,542,504,369,017,222,000 | 33.802326 | 79 | 0.546275 | false |
abdelhalim/gits3 | src/gits3/amazon_s3_transport.py | 1 | 5828 | # Copyright (C) 2009 Abdelhalim Ragab <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 3
# of the License or (at your option) any later version of
# the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
@author: abdelhalim
'''
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from git_config import GitConfig
import re
import os
class S3Transport(object):
URL_PATTERN = re.compile(
r'(?P<protocol>[^:]+)://'
r'(?P<config>[^@]+)@'
r'(?P<bucket>[^/]+)/'
r'(?P<prefix>.*)'
)
def __init__(self, url):
self.url = url
o = self.URL_PATTERN.match(self.url)
if o:
bucket_name = o.group('bucket')
self.prefix = o.group('prefix')
if self.prefix.endswith('/'):
self.prefix = self.prefix[:-1]
# read the jgit config file to access S3
config_file = o.group('config')
homedir = os.path.expanduser('~')
config_path = homedir + '/' + config_file
# print config_path
props = self.open_properties(config_path)
accesskey = props['accesskey']
secretkey = props['secretkey']
# print 'accesskey=',accesskey
# print 'secretkey=',secretkey
self.s3Conn = S3Connection(accesskey,secretkey)
self.bucket = self.s3Conn.get_bucket(bucket_name, False)
# print self.bucket
def open_properties(self, properties_file):
propFile= file( properties_file, "rU" )
propDict= dict()
for propLine in propFile:
propDef= propLine.strip()
if len(propDef) == 0:
continue
if propDef[0] in ( '!', '#' ):
continue
punctuation= [ propDef.find(c) for c in ':= ' ] + [ len(propDef) ]
found= min( [ pos for pos in punctuation if pos != -1 ] )
name= propDef[:found].rstrip()
value= propDef[found:].lstrip(":= ").rstrip()
propDict[name]= value
propFile.close()
return propDict
def upload_pack(self, file_name):
pack_full_path = self.prefix + '/objects/pack/'
self.upload_file(pack_full_path, file_name)
def upload_file(self, prefix, file_name):
new_key = self.bucket.new_key(prefix + file_name)
new_key.set_contents_from_file(open(file_name))
new_key.set_acl('public-read')
pass
def upload_string(self, path, contents):
key_path = self.prefix + '/' + path
key = self.bucket.get_key(key_path)
if not key:
key = self.bucket.new_key(key_path)
key.set_contents_from_string(contents)
key.set_acl('public-read')
def get_pack_names(self):
if self.bucket:
path = self.prefix + '/objects/pack'
keys = self.bucket.list(path)
packs = []
for key in keys:
if key.name.endswith('.pack'):
if key.name.startswith(path):
packs.append(key.name[len(path)+1:len(key.name)])
return packs
def get_advertised_refs(self):
refs = {}
if self.bucket:
# get loose refs
path = self.prefix + '/refs'
keys = self.bucket.list(path)
for key in keys:
name = key.name[len(self.prefix + '/'):]
s = key.get_contents_as_string()
ref = self.get_ref(s, refs)
refs[name] = {name:ref}
# read HEAD
path = self.prefix + '/HEAD'
key = self.bucket.get_key(path)
if key:
s = key.get_contents_as_string()
ref = self.get_ref(s, refs)
refs['HEAD'] = {'HEAD':ref}
return refs
def get_ref(self, s, refs):
if s.startswith('ref: '):
target = s[len('ref: '):]
target = target.strip()
try:
target_ref = refs[target]
except KeyError:
target_ref = None
if target_ref:
return target_ref[target]
return s
def create_new_repo(self, refs):
if self.bucket:
# .git/config file
config_str = '[core]\n' + '\trepositoryformatversion = 0\n'
key = self.bucket.new_key(self.prefix + '/config')
key.set_contents_from_string(config_str)
key.set_acl('public-read')
# .git/HEAD
if refs.startswith('refs/heads'):
head_str = 'ref: ' + refs + '\n'
else:
head_str = 'refs: refs/heads/' + refs + '\n'
key = self.bucket.new_key(self.prefix + '/HEAD')
key.set_contents_from_string(head_str)
key.set_acl('public-read') | gpl-2.0 | 1,939,522,590,412,960,500 | 31.20442 | 78 | 0.498284 | false |
CoderDojoPL/minecraft-python | rzeka.py | 1 | 1568 | # -*- coding: utf-8 -*-
# Autor: Wojtek Gembalczyk [email protected]
import mcpi.minecraft as minecraft
import mcpi.block as block
import time
import math
def woda3x3(x, z):
mc.setBlocks(x, 0, z-6, x, 2, z+6, block.AIR.id) #powietrze nad rzeką
mc.setBlocks(x, 0, z-5, x, 0, z+5, block.DIRT.id) #waly powodziowe
mc.setBlocks(x, 0, z-4, x, 1, z+4, block.DIRT.id) #waly powodziowe
mc.setBlocks(x, -1, z-2, x, 1, z+2, block.WATER.id) #woda
def fragment_mostu(x, y, z):
mc.setBlocks(x-2, y, z, x+2, y+1, z, block.GLASS.id)
mc.setBlocks(x-1, y, z, x+1, y, z, block.STONE.id)
mc.setBlocks(x-1, y+1, z, x+1, y+1, z, block.AIR.id)
def most(x, z):
fragment_mostu(x, 0, z-10)
fragment_mostu(x, 1, z-9)
fragment_mostu(x, 2, z-8)
for i in range(-7, 8):
fragment_mostu(x, 3, z+i)
fragment_mostu(x, 2, z+8)
fragment_mostu(x, 1, z+9)
fragment_mostu(x, 0, z+10)
mc = minecraft.Minecraft.create()
dlugosc = 100
mosty_fr = 40
mc.setBlocks(-50, 0, 0, dlugosc+50, 50, 100, block.AIR.id)
mc.setBlocks(-50, -5, 0, dlugosc+50, -1, 100, block.DIRT.id)
time.sleep(2)
mc.setBlocks(-2, -1, 45, -1, 1, 55, block.DIRT.id)
for i in range(0, dlugosc*10, 7):
z = float(i)/100
woda3x3(int(z*10), int(math.sin(z)*7+50))
time.sleep(0.02)
if i%(7*mosty_fr)==0 and i<>0:
most(int(z*10), int(math.sin(z)*7+50))
ostatni_x = int(z*10)
ostatni_z = int(math.sin(z)*7+50)
mc.setBlocks(ostatni_x-1, -1, ostatni_z-5, ostatni_x, 1, ostatni_z+5, block.DIRT.id)
| gpl-2.0 | -6,279,255,793,626,819,000 | 29.72549 | 84 | 0.596682 | false |
EventTeam/beliefs | src/beliefs/referent.py | 1 | 4770 | """ This file defines two classes, TaxonomyCell and Referent, which work in
tandem to provide the 'kind' property to all referents, and the generalization
structure. The generalization structure (a taxonomy-- a directed acyclic graph
of IS-A relationships) is automatically constructed using the object-oriented
inheritance structures of classes that inherit from Referents.
Referent is a sub-class of DictCell and contains an instance of TaxonomyCell, which
is initialized to the base class of whatever subclasses Referent.
In the file that loads all Referent subclasses, usually __init__.py, after all
classes are loaded, there must be a special call to initialize TaxonomyCell's
domain:
>> import sys
>> from beliefs.referent import *
>>
>> TaxonomyCell.initialize(sys.modules[__name__])
"""
import inspect
import networkx as nx
import numpy as np
from beliefs.cells import *
class TaxonomyCell(PartialOrderedCell):
""" A taxonomy of all DictCell subclasses."""
def __init__(self, initial_value=None):
if not self.has_domain(): # only initialize once
#raise Exception("TaxonomyCell.initialize(sys.modules[__name__]) must be called after importing classes")
print "initializing"
# represents IS-A relationships
PartialOrderedCell.__init__(self, None)
if initial_value:
self.merge(initial_value)
@classmethod
def initialize(clz, modules):
taxonomy = TaxonomyCell.build_class_graph(modules)
clz.set_domain(taxonomy)
@staticmethod
def build_class_graph(modules, klass=None, graph=None):
""" Builds up a graph of the DictCell subclass structure """
if klass is None:
class_graph = nx.DiGraph()
for name, classmember in inspect.getmembers(modules, inspect.isclass):
if issubclass(classmember, Referent) and classmember is not Referent:
TaxonomyCell.build_class_graph(modules, classmember, class_graph)
return class_graph
else:
parents = getattr(klass, '__bases__')
for parent in parents:
if parent != Referent:
graph.add_edge(parent.__name__, klass.__name__)
# store pointer to classes in property 'class'
graph.node[parent.__name__]['class'] = parent
graph.node[klass.__name__]['class'] = klass
if issubclass(parent, Referent):
TaxonomyCell.build_class_graph(modules, parent, graph)
class Referent(DictCell):
""" Thin DictCell subclass to inject the TaxonomyCell property after
initialization """
def __init__(self, *args, **kwargs):
DictCell.__init__(self, *args, **kwargs)
self.kind = TaxonomyCell(self.__class__.__name__)
self.num = IntervalCell(0, 100)
@classmethod
def cells_from_defaults(clz, jsonobj):
""" Creates a referent instance of type `json.kind` and
initializes it to default values.
"""
# convert strings to dicts
if isinstance(jsonobj, (str, unicode)):
jsonobj = json.loads(jsonobj)
assert 'cells' in jsonobj, "No cells in object"
domain = TaxonomyCell.get_domain()
cells = []
for num, cell_dna in enumerate(jsonobj['cells']):
assert 'kind' in cell_dna, "No type definition"
classgenerator = domain.node[cell_dna['kind']]['class']
cell = classgenerator()
cell['num'].merge(num)
for attr, val in cell_dna.items():
if not attr in ['kind']:
cell[attr].merge(val)
cells.append(cell)
return cells
@classmethod
def from_defaults(clz, defaults):
""" Given a dictionary of defaults, ie {attribute: value},
this classmethod constructs a new instance of the class and
merges the defaults"""
if isinstance(defaults, (str, unicode)):
defaults = json.loads(defaults)
c = clz()
for attribute in defaults.keys():
if attribute in c:
value = defaults[attribute]
c[attribute].merge(value)
# in case any values were not specified, attempt to merge them with
# the settings provided by clz.random()
cr = clz.random()
for attribute, value in cr:
try:
c[attribute].merge(value)
except Contradiction:
pass
return c
class Nameable(Referent):
""" A referent with a name """
def __init__(self):
Referent.__init__(self)
self.name = NameCell()
| gpl-2.0 | 2,732,122,672,072,562,700 | 36.857143 | 117 | 0.607757 | false |
dhermes/google-cloud-python | vision/tests/unit/gapic/v1/test_image_annotator_client_v1.py | 2 | 5242 | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pytest
from google.rpc import status_pb2
from google.cloud import vision_v1
from google.cloud.vision_v1.proto import image_annotator_pb2
from google.longrunning import operations_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self, method, request_serializer=None, response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestImageAnnotatorClient(object):
def test_batch_annotate_images(self):
# Setup Expected Response
expected_response = {}
expected_response = image_annotator_pb2.BatchAnnotateImagesResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1.ImageAnnotatorClient()
# Setup Request
requests = []
response = client.batch_annotate_images(requests)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = image_annotator_pb2.BatchAnnotateImagesRequest(
requests=requests
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_batch_annotate_images_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1.ImageAnnotatorClient()
# Setup request
requests = []
with pytest.raises(CustomException):
client.batch_annotate_images(requests)
def test_async_batch_annotate_files(self):
# Setup Expected Response
expected_response = {}
expected_response = image_annotator_pb2.AsyncBatchAnnotateFilesResponse(
**expected_response
)
operation = operations_pb2.Operation(
name="operations/test_async_batch_annotate_files", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1.ImageAnnotatorClient()
# Setup Request
requests = []
response = client.async_batch_annotate_files(requests)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = image_annotator_pb2.AsyncBatchAnnotateFilesRequest(
requests=requests
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_async_batch_annotate_files_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_async_batch_annotate_files_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1.ImageAnnotatorClient()
# Setup Request
requests = []
response = client.async_batch_annotate_files(requests)
exception = response.exception()
assert exception.errors[0] == error
| apache-2.0 | 2,487,649,717,542,704,600 | 32.177215 | 87 | 0.6591 | false |
leonth/private-configs | sublime-text-3/Packages/SublimePythonIDE/server/linter.py | 1 | 6132 | # -*- coding: utf-8 -*-
import sys
import os
import os.path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../lib"))
import _ast
import pep8
import pyflakes.checker as pyflakes
pyflakes.messages.Message.__str__ = (
lambda self: self.message % self.message_args
)
class PyflakesLoc:
""" Error location data for pyflakes.
pyflakes 0.7 wants loc as {lineno, col_offset} object
we ducktype it here. Apparently AST code
has been upgraded in some point?
Online lineno attribute is required.
"""
def __init__(self, lineno):
self.lineno = lineno
class PythonLintError(pyflakes.messages.Message):
def __init__(
self, filename, loc, level, message,
message_args, offset=0, text=None):
super(PythonLintError, self).__init__(filename, PyflakesLoc(loc))
self.level = level
self.message = message
self.message_args = message_args
self.offset = offset
if text is not None:
self.text = text
class Pep8Error(PythonLintError):
def __init__(self, filename, loc, offset, code, text):
# PEP 8 Errors are downgraded to "warnings"
super(Pep8Error, self).__init__(
filename, loc, 'W', '[W] PEP 8 (%s): %s',
(code, text), offset=offset, text=text
)
class Pep8Warning(PythonLintError):
def __init__(self, filename, loc, offset, code, text):
# PEP 8 Warnings are downgraded to "violations"
super(Pep8Warning, self).__init__(
filename, loc, 'V', '[V] PEP 8 (%s): %s',
(code, text), offset=offset, text=text
)
class OffsetError(PythonLintError):
def __init__(self, filename, loc, text, offset):
super(OffsetError, self).__init__(
filename, loc, 'E', '[E] %r', (text,), offset=offset + 1, text=text
)
class PythonError(PythonLintError):
def __init__(self, filename, loc, text):
super(PythonError, self).__init__(
filename, loc, 'E', '[E] %r', (text,), text=text
)
def pyflakes_check(code, encoding, filename, ignore=None):
try:
tree = compile(code.encode(encoding), filename, "exec", _ast.PyCF_ONLY_AST)
except (SyntaxError, IndentationError) as value:
msg = value.args[0]
(lineno, offset, text) = value.lineno, value.offset, value.text
# If there's an encoding problem with the file, the text is None.
if text is None:
# Avoid using msg, since for the only known case, it contains a
# bogus message that claims the encoding the file declared was
# unknown.
if msg.startswith('duplicate argument'):
arg = msg.split('duplicate argument ', 1)[1].split(' ', 1)[0]
arg = arg.strip('\'"')
error = pyflakes.messages.DuplicateArgument(
filename, lineno, arg
)
else:
error = PythonError(filename, lineno, msg)
else:
line = text.splitlines()[-1]
if offset is not None:
offset = offset - (len(text) - len(line))
if offset is not None:
error = OffsetError(filename, lineno, msg, offset)
else:
error = PythonError(filename, lineno, msg)
return [error]
except ValueError as e:
return [PythonError(filename, 1, e.args[0])]
else:
# Okay, it's syntactically valid. Now check it.
w = pyflakes.Checker(tree, filename, builtins=ignore)
return w.messages
def pep8_check(code, filename, ignore=None, max_line_length=pep8.MAX_LINE_LENGTH):
messages = []
_lines = code.split('\n')
if _lines:
class SublimeLinterReport(pep8.BaseReport):
def error(self, line_number, offset, text, check):
"""Report an error, according to options."""
code = text[:4]
message = text[5:]
if self._ignore_code(code):
return
if code in self.counters:
self.counters[code] += 1
else:
self.counters[code] = 1
self.messages[code] = message
# Don't care about expected errors or warnings
if code in self.expected:
return
self.file_errors += 1
self.total_errors += 1
if code.startswith('E'):
messages.append(Pep8Error(
filename, line_number, offset, code, message)
)
else:
messages.append(Pep8Warning(
filename, line_number, offset, code, message)
)
return code
_ignore = ignore + pep8.DEFAULT_IGNORE.split(',')
options = pep8.StyleGuide(
reporter=SublimeLinterReport, ignore=_ignore).options
options.max_line_length = max_line_length
good_lines = [l + '\n' for l in _lines]
good_lines[-1] = good_lines[-1].rstrip('\n')
if not good_lines[-1]:
good_lines = good_lines[:-1]
try:
pep8.Checker(filename, good_lines, options=options).check_all()
except Exception as e:
print("An exception occured when running pep8 checker: %s" % e)
return messages
def do_linting(lint_settings, code, encoding, filename):
errors = []
if lint_settings.get("pep8", True):
params = {
'ignore': lint_settings.get('pep8_ignore', []),
'max_line_length': lint_settings.get(
'pep8_max_line_length', None) or pep8.MAX_LINE_LENGTH,
}
errors.extend(pep8_check(
code, filename, **params)
)
pyflakes_ignore = lint_settings.get('pyflakes_ignore', None)
pyflakes_disabled = lint_settings.get('pyflakes_disabled', False)
if not pyflakes_disabled:
errors.extend(pyflakes_check(code, encoding, filename, pyflakes_ignore))
return errors
| mit | 9,175,192,041,530,635,000 | 29.81407 | 83 | 0.558056 | false |
dmilos/nucleotide | src/nucleotide/component/linux/gcc/atom/boost.py | 1 | 1466 | #!/usr/bin/env python2
# Copyright 2015 Dejan D. M. Milosavljevic
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import platform
import nucleotide
import nucleotide.component
import nucleotide.component.function
## TODO join this with 'boost'
atom_boost_thread = {
'platform' : {
'host' : 'X',
'guest' : 'X'
},
'cc' : {
'vendor' : 'FSF',
'name' : 'gcc',
'version': 'X'
},
'config' : {
'LINKFLAGS' : [ '-pthread', '-lboost_thread', '-lboost_system' ],
'CPPFLAGS' : [ '-fopenmp' ],
'CPPDEFINES' : [ 'BOOST_LOG_DYN_LINK' ]
},
'name' : 'boost:thread',
'class': [ 'boost:thread' ]
}
class Boost:
def __init__(self):
pass
@staticmethod
def extend( P_option ):
nucleotide.component.function.extend( P_option, 'boost:blank', atom_boost_thread )
@staticmethod
def check():
pass
| apache-2.0 | 6,499,534,218,513,792,000 | 25.178571 | 101 | 0.614598 | false |
m4sterm1nd/android | GcmDemo/appengine/gcm2.py | 1 | 14016 | ################################################################################
# gae_python_gcm/gcm.py
#
# In Python, for Google App Engine
# Originally ported from https://github.com/Instagram/node2dm
# Extended to support new GCM API.
# Greg Bayer <[email protected]>
################################################################################
from datetime import datetime, timedelta
import logging
import re
import urllib, urllib2
try:
import json
except ImportError:
import simplejson as json
from google.appengine.api import taskqueue ## Google App Engine specific
from django.utils import importlib
LOCALHOST = False
GCM_CONFIG = {'gcm_api_key': 'AIzaSyCtawZWxrcAD8qdpilbz3NKKqHspNDIOGE',
# 'delete_bad_token_callback_func': 'EXAMPLE_MANAGE_TOKENS_MODULE.delete_bad_gcm_token',
# 'update_token_callback_func': 'EXAMPLE_MANAGE_TOKENS_MODULE.update_gcm_token',
}
try:
from settings import LOCALHOST, GCM_CONFIG
except:
logging.info('GCM settings module not found. Using defaults.')
pass
GOOGLE_LOGIN_URL = 'https://www.google.com/accounts/ClientLogin'
# Can't use https on localhost due to Google cert bug
GOOGLE_GCM_SEND_URL = 'http://android.apis.google.com/gcm/send' if LOCALHOST \
else 'https://android.apis.google.com/gcm/send'
GOOGLE_GCM_SEND_URL = 'http://android.googleapis.com/gcm/send' if LOCALHOST \
else 'https://android.googleapis.com/gcm/send'
GCM_QUEUE_NAME = 'gcm-retries'
GCM_QUEUE_CALLBACK_URL = '/gae_python_gcm/send_request'
# Memcache config
MEMCACHE_PREFIX = 'GCMConnection:'
# Memcached vars
RETRY_AFTER = 'retry_after'
TOTAL_ERRORS = 'total_errors'
TOTAL_MESSAGES = 'total_messages'
class GCMMessage:
device_tokens = None
notification = None
collapse_key = None
delay_while_idle = None
time_to_live = None
def __init__(self, device_tokens, notification, collapse_key=None, delay_while_idle=None, time_to_live=None):
if isinstance(device_tokens, list):
self.device_tokens = device_tokens
else:
self.device_tokens = [device_tokens]
self.notification = notification
self.collapse_key = collapse_key
self.delay_while_idle = delay_while_idle
self.time_to_live = time_to_live
def __unicode__(self):
return "%s:%s:%s:%s:%s" % (repr(self.device_tokens), repr(self.notification), repr(self.collapse_key), repr(self.delay_while_idle), repr(self.time_to_live))
def json_string(self):
if not self.device_tokens or not isinstance(self.device_tokens, list):
logging.error('GCMMessage generate_json_string error. Invalid device tokens: ' + repr(self))
raise Exception('GCMMessage generate_json_string error. Invalid device tokens.')
json_dict = {}
json_dict['registration_ids'] = self.device_tokens
# If message is a dict, send each key individually
# Else, send entire message under data key
if isinstance(self.notification, dict):
json_dict['data'] = self.notification
else:
json_dict['data'] = {'data': self.notification}
if self.collapse_key:
json_dict['collapse_key'] = self.collapse_key
if self.delay_while_idle:
json_dict['delay_while_idle'] = self.delay_while_idle
if self.time_to_live:
json_dict['time_to_live'] = self.time_to_live
json_str = json.dumps(json_dict)
return json_str
# Instantiate to send GCM message. No initialization required.
class GCMConnection:
################################ Config ###############################
# settings.py
#
# GCM_CONFIG = {'gcm_api_key': '',
# 'delete_bad_token_callback_func': lambda x: x,
# 'update_token_callback_func': lambda x: x}
##############################################################################
# Call this to send a push notification
def notify_device(self, message, deferred=False):
self._incr_memcached(TOTAL_MESSAGES, 1)
self._submit_message(message, deferred=deferred)
##### Public Utils #####
def debug(self, option):
if option == "help":
return "Commands: help stats\n"
elif option == "stats":
output = ''
# resp += "uptime: " + elapsed + " seconds\n"
output += "messages_sent: " + str(self._get_memcached(TOTAL_MESSAGES)) + "\n"
# resp += "messages_in_queue: " + str(self.pending_messages.length) + "\n"
output += "backing_off_retry_after: " + str(self._get_memcached(RETRY_AFTER)) + "\n"
output += "total_errors: " + str(self._get_memcached(TOTAL_ERRORS)) + "\n"
return output
else:
return "Invalid command\nCommands: help stats\n"
##### Hooks - Override to change functionality #####
def delete_bad_token(self, bad_device_token):
logging.info('delete_bad_token(): ' + repr(bad_device_token))
if 'delete_bad_token_callback_func' in GCM_CONFIG:
bad_token_callback_func_path = GCM_CONFIG['delete_bad_token_callback_func']
mod_path, func_name = bad_token_callback_func_path.rsplit('.', 1)
mod = importlib.import_module(mod_path)
logging.info('delete_bad_token_callback_func: ' + repr((mod_path, func_name, mod)))
bad_token_callback_func = getattr(mod, func_name)
bad_token_callback_func(bad_device_token)
def update_token(self, old_device_token, new_device_token):
logging.info('update_token(): ' + repr((old_device_token, new_device_token)))
if 'update_token_callback_func' in GCM_CONFIG:
bad_token_callback_func_path = GCM_CONFIG['update_token_callback_func']
mod_path, func_name = bad_token_callback_func_path.rsplit('.', 1)
mod = importlib.import_module(mod_path)
logging.info('update_token_callback_func: ' + repr((mod_path, func_name, mod)))
bad_token_callback_func = getattr(mod, func_name)
bad_token_callback_func(old_device_token, new_device_token)
# Currently unused
def login_complete(self):
# Retries are handled by the gae task queue
# self.retry_pending_messages()
pass
##### Helper functions #####
def _gcm_connection_memcache_key(self, variable_name):
return 'GCMConnection:' + variable_name
def _get_memcached(self, variable_name):
memcache_key = self._gcm_connection_memcache_key(variable_name)
return cache.get(memcache_key)
def _set_memcached(self, variable_name, value, timeout=None):
memcache_key = self._gcm_connection_memcache_key(variable_name)
return cache.set(memcache_key, value, timeout=timeout)
def _incr_memcached(self, variable_name, increment):
memcache_key = self._gcm_connection_memcache_key(variable_name)
try:
return cache.incr(memcache_key, increment)
except ValueError:
return cache.set(memcache_key, increment)
# Add message to queue
def _requeue_message(self, message):
taskqueue.add(queue_name=GCM_QUEUE_NAME, url=GCM_QUEUE_CALLBACK_URL, params={'device_token': message.device_tokens, 'collapse_key': message.collapse_key, 'notification': message.notification})
# If send message now or add it to the queue
def _submit_message(self, message, deferred=False):
if deferred:
self._requeue_message(message)
else:
self._send_request(message)
# Try sending message now
def _send_request(self, message):
if message.device_tokens == None or message.notification == None:
logging.error('Message must contain device_tokens and notification.')
return False
# Check for resend_after
retry_after = self._get_memcached(RETRY_AFTER)
if retry_after != None and retry_after > datetime.now():
logging.warning('RETRY_AFTER: ' + repr(retry_after) + ', requeueing message: ' + repr(message))
self._requeue_message(message)
return
# Build request
headers = {
'Authorization': 'key=' + GCM_CONFIG['gcm_api_key'],
'Content-Type': 'application/json'
}
gcm_post_json_str = ''
try:
gcm_post_json_str = message.json_string()
except:
logging.exception('Error generating json string for message: ' + repr(message))
return
logging.info('Sending gcm_post_body: ' + repr(gcm_post_json_str))
request = urllib2.Request(GOOGLE_GCM_SEND_URL, gcm_post_json_str, headers)
# Post
try:
resp = urllib2.urlopen(request)
resp_json_str = resp.read()
resp_json = json.loads(resp_json_str)
logging.info('_send_request() resp_json: ' + repr(resp_json))
# multicast_id = resp_json['multicast_id']
# success = resp_json['success']
failure = resp_json['failure']
canonical_ids = resp_json['canonical_ids']
results = resp_json['results']
# If the value of failure and canonical_ids is 0, it's not necessary to parse the remainder of the response.
if failure == 0 and canonical_ids == 0:
# Success, nothing to do
return
else:
# Process result messages for each token (result index matches original token index from message)
result_index = 0
for result in results:
if 'message_id' in result and 'registration_id' in result:
# Update device token
try:
old_device_token = message.device_tokens[result_index]
new_device_token = result['registration_id']
self.update_token(old_device_token, new_device_token)
except:
logging.exception('Error updating device token')
return
elif 'error' in result:
# Handle GCM error
error_msg = result.get('error')
try:
device_token = message.device_tokens[result_index]
self._on_error(device_token, error_msg, message)
except:
logging.exception('Error handling GCM error: ' + repr(error_msg))
return
result_index += 1
except urllib2.HTTPError, e:
self._incr_memcached(TOTAL_ERRORS, 1)
if e.code == 400:
logging.error('400, Invalid GCM JSON message: ' + repr(gcm_post_json_str))
elif e.code == 401:
logging.error('401, Error authenticating with GCM. Retrying message. Might need to fix auth key!')
self._requeue_message(message)
elif e.code == 500:
logging.error('500, Internal error in the GCM server while trying to send message: ' + repr(gcm_post_json_str))
elif e.code == 503:
retry_seconds = int(resp.headers.get('Retry-After')) or 10
logging.error('503, Throttled. Retry after delay. Requeuing message. Delay in seconds: ' + str(retry_seconds))
retry_timestamp = datetime.now() + timedelta(seconds=retry_seconds)
self._set_memcached(RETRY_AFTER, retry_timestamp)
self._requeue_message(message)
else:
logging.exception('Unexpected HTTPError: ' + str(e.code) + " " + e.msg + " " + e.read())
def _on_error(self, device_token, error_msg, message):
self._incr_memcached(TOTAL_ERRORS, 1)
if error_msg == "MissingRegistration":
logging.error('ERROR: GCM message sent without device token. This should not happen!')
elif error_msg == "InvalidRegistration":
self.delete_bad_token(device_token)
elif error_msg == "MismatchSenderId":
logging.error('ERROR: Device token is tied to a different sender id: ' + repr(device_token))
self.delete_bad_token(device_token)
elif error_msg == "NotRegistered":
self.delete_bad_token(device_token)
elif error_msg == "MessageTooBig":
logging.error("ERROR: GCM message too big (max 4096 bytes).")
elif error_msg == "InvalidTtl":
logging.error("ERROR: GCM Time to Live field must be an integer representing a duration in seconds between 0 and 2,419,200 (4 weeks).")
elif error_msg == "MessageTooBig":
logging.error("ERROR: GCM message too big (max 4096 bytes).")
elif error_msg == "Unavailable":
retry_seconds = 10
logging.error('ERROR: GCM Unavailable. Retry after delay. Requeuing message. Delay in seconds: ' + str(retry_seconds))
retry_timestamp = datetime.now() + timedelta(seconds=retry_seconds)
self._set_memcached(RETRY_AFTER, retry_timestamp)
self._requeue_message(message)
elif error_msg == "InternalServerError":
logging.error("ERROR: Internal error in the GCM server while trying to send message: " + repr(message))
else:
logging.error("Unknown error: %s for device token: %s" % (repr(error_msg), repr(device_token)))
| gpl-2.0 | -7,267,360,330,235,582,000 | 39.744186 | 201 | 0.573202 | false |
DayGitH/Python-Challenges | DailyProgrammer/DP20140811A.py | 1 | 1168 | """
[8/11/2014] Challenge #175 [Easy] Bogo!
https://www.reddit.com/r/dailyprogrammer/comments/2d8yk5/8112014_challenge_175_easy_bogo/
#Description
A bogo sort is a purposefully inefficient algorithm for sorting a sequence. Today we will be using this for strings to
test for equality.
Here is wikipedias entry for a [Bogo-Sort](http://en.wikipedia.org/wiki/Bogo-sort)
#Inputs & Outputs
Given a scrambled string N and another string M. You must sort N so that it matches M. After it has been sorted, it
must output how many iterations it took to complete the sorting.
##Sample Inputs & Outputs
Input:
Bogo("lolhe","Hello")
Output:
1456 iterations
#Bonus
For a bit of fun, the LEAST efficient algorithm wins. Check out the bogo-bogo sort, an algorithm that's designed not to
succeed before the heat death of the universe
http://www.dangermouse.net/esoteric/bogobogosort.html
If you have designed an algorithm but it still hasn't finished sorting, if you can prove it WILL sort, you may post
your proof.
#Notes
Have an idea for a challenge?
Consider submitting it to /r/dailyprogrammer_ideas
"""
def main():
pass
if __name__ == "__main__":
main()
| mit | -7,608,320,504,263,199,000 | 32.371429 | 119 | 0.751712 | false |
yfede/gimp-plugin-export-layers | export_layers/pylibgimpplugin/tests/test_progress.py | 1 | 3089 | #-------------------------------------------------------------------------------
#
# This file is part of pylibgimpplugin.
#
# Copyright (C) 2014 khalim19 <[email protected]>
#
# pylibgimpplugin is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pylibgimpplugin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pylibgimpplugin. If not, see <http://www.gnu.org/licenses/>.
#
#-------------------------------------------------------------------------------
#===============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
str = unicode
#===============================================================================
import unittest
from .. import progress
#===============================================================================
class MockProgressBar(object):
def __init__(self):
self.text = ""
self.fraction = 0.0
class MockProgressUpdater(progress.ProgressUpdater):
def _update_progress_bar(self):
self.progress_bar.fraction = self._num_finished_tasks / self.num_total_tasks
def _set_text_progress_bar(self, text):
self.progress_bar.text = text
#===============================================================================
class TestProgressUpdater(unittest.TestCase):
def setUp(self):
self.num_total_tasks = 10
self.progress_bar = MockProgressBar()
self.progress_updater = MockProgressUpdater(self.progress_bar, num_total_tasks=10)
def test_update_tasks(self):
self.progress_updater.update_tasks(self.num_total_tasks / 2)
self.assertEqual(self.progress_updater.num_finished_tasks, self.num_total_tasks / 2)
self.progress_updater.update_tasks(2)
self.assertEqual(self.progress_updater.num_finished_tasks, self.num_total_tasks / 2 + 2)
def test_update_text(self):
self.progress_updater.update_text("Hi there")
self.assertEqual(self.progress_updater.progress_bar.text, "Hi there")
self.progress_updater.update_text(None)
self.assertEqual(self.progress_updater.progress_bar.text, "")
def test_update_with_num_finished_tasks_greater_than_num_tasks(self):
with self.assertRaises(ValueError):
self.progress_updater.update_tasks(self.num_total_tasks + 1)
def test_reset(self):
self.progress_updater.update_text("Hi there")
self.progress_updater.update_tasks(2)
self.progress_updater.reset()
self.assertEqual(self.progress_updater.num_finished_tasks, 0)
self.assertEqual(self.progress_updater.progress_bar.text, "")
| gpl-3.0 | -2,251,561,893,102,036,700 | 34.505747 | 92 | 0.617676 | false |
cedriclaunay/gaffer | python/GafferImageTest/MergeTest.py | 1 | 6664 | ##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import GafferImage
import os
class MergeTest( unittest.TestCase ) :
rPath = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/redWithDataWindow.100x100.exr" )
gPath = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/greenWithDataWindow.100x100.exr" )
bPath = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/blueWithDataWindow.100x100.exr" )
checkerPath = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/checkerboard.100x100.exr" )
checkerRGBPath = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/rgbOverChecker.100x100.exr" )
rgbPath = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/rgb.100x100.exr" )
# Do several tests to check the cache is working correctly:
def testHashes( self ) :
r1 = GafferImage.ImageReader()
r1["fileName"].setValue( self.checkerPath )
r2 = GafferImage.ImageReader()
r2["fileName"].setValue( self.gPath )
##########################################
# Test to see if the hash changes.
##########################################
merge = GafferImage.Merge()
merge["operation"].setValue(8) # 8 is the Enum value of the over operation.
merge["in"].setInput(r1["out"])
merge["in1"].setInput(r2["out"])
h1 = merge["out"].image().hash()
# Switch the inputs.
merge["in1"].setInput(r1["out"])
merge["in"].setInput(r2["out"])
h2 = merge["out"].image().hash()
self.assertNotEqual( h1, h2 )
##########################################
# Test to see if the hash remains the same
# when the output should be the same but the
# input plugs used are not.
##########################################
merge = GafferImage.Merge()
merge["operation"].setValue(8) # 8 is the Enum value of the over operation.
expectedHash = h1
# Connect up a load of inputs ...
merge["in"].setInput(r1["out"])
merge["in1"].setInput(r1["out"])
merge["in2"].setInput(r1["out"])
merge["in3"].setInput(r2["out"])
# but then disconnect two so that the result should still be the same...
merge["in"].setInput( None )
merge["in2"].setInput( None )
h1 = merge["out"].image().hash()
self.assertEqual( h1, expectedHash )
def testHashPassThrough( self ) :
r1 = GafferImage.ImageReader()
r1["fileName"].setValue( self.checkerPath )
##########################################
# Test to see if the input has is always passed
# through if only the first input is connected.
##########################################
merge = GafferImage.Merge()
merge["operation"].setValue(8) # 8 is the Enum value of the over operation.
expectedHash = r1["out"].image().hash()
merge["in"].setInput(r1["out"])
h1 = merge["out"].image().hash()
self.assertEqual( h1, expectedHash )
##########################################
# Test that if we disable the node the hash gets passed through.
##########################################
merge["enabled"].setValue(False)
h1 = merge["out"].image().hash()
self.assertEqual( h1, expectedHash )
# Overlay a red, green and blue tile of different data window sizes and check the data window is expanded on the result and looks as we expect.
def testOverRGBA( self ) :
r = GafferImage.ImageReader()
r["fileName"].setValue( self.rPath )
g = GafferImage.ImageReader()
g["fileName"].setValue( self.gPath )
b = GafferImage.ImageReader()
b["fileName"].setValue( self.bPath )
merge = GafferImage.Merge()
merge["operation"].setValue(8) # 8 is the Enum value of the over operation.
merge["in"].setInput(r["out"])
merge["in1"].setInput(g["out"])
merge["in2"].setInput(b["out"])
mergeResult = merge["out"].image()
expected = IECore.Reader.create( self.rgbPath ).read()
self.assertTrue( not IECore.ImageDiffOp()( imageA = expected, imageB = mergeResult, skipMissingChannels = False, maxError = 0.001 ).value )
# Overlay a red, green and blue tile of different data window sizes and check the data window is expanded on the result and looks as we expect.
def testOverRGBAonRGB( self ) :
c = GafferImage.ImageReader()
c["fileName"].setValue( self.checkerPath )
r = GafferImage.ImageReader()
r["fileName"].setValue( self.rPath )
g = GafferImage.ImageReader()
g["fileName"].setValue( self.gPath )
b = GafferImage.ImageReader()
b["fileName"].setValue( self.bPath )
merge = GafferImage.Merge()
merge["operation"].setValue(8) # 8 is the Enum value of the over operation.
merge["in"].setInput(c["out"])
merge["in1"].setInput(r["out"])
merge["in2"].setInput(g["out"])
merge["in3"].setInput(b["out"])
mergeResult = merge["out"].image()
expected = IECore.Reader.create( self.checkerRGBPath ).read()
self.assertTrue( not IECore.ImageDiffOp()( imageA = expected, imageB = mergeResult, skipMissingChannels = False, maxError = 0.001 ).value )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 524,376,069,707,290,940 | 36.438202 | 144 | 0.656212 | false |
softcert/vsroom | vsroom/common/historian4.py | 1 | 10451 | import os
import time
import math
import errno
import marshal
import collections
import idiokit
from idiokit import timer, xmlcore
from idiokit.xmpp.jid import JID
from abusehelper.core import bot, events, taskfarm, services
from vsroom.common import eventdb
NS = "vsr#historian"
try:
import json
JSONDecodeError = ValueError
except ImportError:
import simplejson as json
JSONDecodeError = getattr(json, "JSONDecodeError", ValueError)
def current_time(multiplier=10**6):
return time.time() * multiplier
def is_valid(event):
"""Return whether an event contains values for keys other than "id".
>>> event = events.Event()
>>> is_valid(event)
False
>>> event.add("id", "1")
>>> is_valid(event)
False
>>> event.add("other", "2")
>>> is_valid(event)
True
"""
contains_other = False
for key in event.keys():
if key != "id":
contains_other = True
break
return event.contains("id") and contains_other
class Timeout(Exception):
pass
class EventDB(object):
def __init__(self, filename):
self.db = eventdb.Writer(filename)
self.jids = dict()
self.ids = dict()
def commit(self):
self.db.commit(current_time())
def close(self):
self.db.close(current_time())
def add_event(self, jid, event):
timestamp = current_time()
ids = event.values("id")
if not ids:
obj = dict((x, list(event.values(x))) for x in event.keys())
self.db.append_obj(timestamp, timestamp, marshal.dumps(obj))
return
for id in ids:
copy = events.Event(event)
copy.clear("id")
copy.add("id", id)
if is_valid(copy):
obj = dict((x, list(copy.values(x))) for x in copy.keys())
self._open(timestamp, id, jid, marshal.dumps(obj))
else:
self._close(timestamp, id)
def purge_jid(self, jid):
ids = list(self.jids.get(jid, ()))
for id in ids:
self._close(current_time(), id)
def _close(self, timestamp, id):
if id in self.ids:
jid = self.ids.pop(id)
ids = self.jids.get(jid, set())
ids.discard(id)
if not ids:
self.jids.pop(jid, None)
self.db.set_obj(timestamp, id, None)
def _open(self, timestamp, id, jid, obj):
self._close(timestamp, id)
self.ids[id] = jid
self.jids.setdefault(jid, set()).add(id)
self.db.set_obj(timestamp, id, obj)
def query(self, start=None, end=None):
start = None if start is None else start * 10**6
end = None if end is None else end * 10**6
self.commit()
for start, end, obj in self.db.query(start, end):
start = int(start * 10**-6)
end = None if end is None else int(end * 10**-6)
yield start, end, marshal.loads(obj)
def histogram(self, id, h_start, h_end, step):
self.commit()
step = 2**max(0, int(math.ceil(math.log(step, 2))))
h_start = step * math.floor(h_start / step)
h_end = step * math.ceil(h_end / step)
deltas = dict()
deltas[h_start] = 0
for start, end, obj in self.db.query(h_start * 10**6, h_end * 10**6):
start *= 10**-6
start -= start % step
deltas[start] = deltas.get(start, 0) + 1
if end is not None:
end *= 10**-6
end += step - end % step
deltas[end] = deltas.get(end, 0) - 1
data = list()
count = 0
for time, delta in sorted(deltas.items()):
count += delta
if h_start <= time < h_end:
if not data or data[-1]["value"] != count:
data.append(dict(offset=int(time-h_start), value=count))
result = dict(id=id, start=h_start, end=h_end, step=step, values=data)
element = xmlcore.Element("histogram", xmlns=NS)
element.text = json.dumps(result)
return element
class QuerySet(object):
def __init__(self):
self.ids = dict()
def __nonzero__(self):
for query, amounts in self.ids.itervalues():
if amounts:
return True
return False
def start(self, jid, id, query):
self.ids[(jid, id)] = query, collections.deque()
def load(self, jid, id, size):
if (jid, id) in self.ids:
query, sizes = self.ids[(jid, id)]
sizes.append(size)
def discard_jid(self, discarded_jid):
for (jid, id) in list(self.ids):
if jid == discarded_jid:
del self.ids[(jid, id)]
def __iter__(self):
for (jid, id), (query, sizes) in list(self.ids.iteritems()):
if not sizes:
continue
events = list()
result = dict(id=id)
while sizes[0] > 0 and len(events) < 10:
sizes[0] -= 1
try:
start, end, event_dict = query.next()
except StopIteration:
result.update(done=True)
del self.ids[(jid, id)]
break
else:
event_info = dict(start=start, event=event_dict)
if end is not None:
event_info.update(end=end)
events.append(event_info)
result.update(events=events)
result.update(remains=sizes[0])
if sizes[0] <= 0:
sizes.popleft()
element = xmlcore.Element("dump", xmlns=NS)
element.text = json.dumps(result)
yield jid, element
class HistorianService(bot.ServiceBot):
bot_state_file = bot.Param()
def __init__(self, bot_state_file=None, **keys):
bot.ServiceBot.__init__(self, bot_state_file=None, **keys)
self.rooms = taskfarm.TaskFarm(self.handle_room)
self.db_dir = bot_state_file
try:
os.makedirs(self.db_dir)
except OSError, ose:
if errno.EEXIST != ose.errno:
raise ose
@idiokit.stream
def session(self, state, src_room):
try:
yield self.rooms.inc(src_room)
except services.Stop:
idiokit.stop()
@idiokit.stream
def handle_room(self, name):
db_file = os.path.join(self.db_dir, name)
db = EventDB(db_file)
try:
self.log.info("Joining room %r", name)
room = yield self.xmpp.muc.join(name, self.bot_name)
self.log.info("Joined room %r", name)
try:
yield room | self.parse(db) | self.commit(db)
finally:
self.log.info("Left room %r", name)
finally:
db.close()
@idiokit.stream
def _timeout(self, timeout):
yield timer.sleep(timeout)
raise Timeout()
@idiokit.stream
def parse(self, db):
queries = QuerySet()
while True:
next = idiokit.next()
if queries:
idiokit.pipe(self._timeout(0.0), next)
try:
elements = yield next
except Timeout:
pass
else:
for element in elements.with_attrs("from"):
sender = JID(element.get_attr("from"))
if element.named("presence").with_attrs(type="unavailable"):
db.purge_jid(sender)
queries.discard_jid(sender)
for message in element.named("message"):
if not message.with_attrs(type="groupchat"):
continue
for event in events.Event.from_elements(message):
db.add_event(sender, event)
for query in element.named("message").children(ns=NS):
try:
args = json.loads(query.text)
except JSONDecodeError:
self.log.error("Invalid query data from %r: %r",
sender, query.text)
continue
if "id" not in args:
self.log.error("Query without an ID from %r: %r",
sender, args)
continue
id = args.get("id")
if query.named("start"):
start = args.get("start", None)
end = args.get("end", None)
queries.start(sender, id, db.query(start, end))
self.log.info("Start from %r: %r", sender, args)
elif query.named("load"):
if "size" in args:
queries.load(sender, id, args.get("size"))
self.log.debug("Load from %r: %r", sender, args)
else:
self.log.error("Load without an ID from %r: %r",
sender, args)
elif query.named("histogram"):
start = args.get("start", None)
end = args.get("end", None)
step = args.get("step", None)
if None not in (start, end, step):
element = db.histogram(id, start, end, step)
self.xmpp.core.message(sender, element)
self.log.debug("Histogram from %r: %r",
sender, args)
elif query.named("cancel"):
queries.cancel(sender, id)
self.log.info("Cancel from %r: %r", sender, args)
for sender, element in queries:
yield self.xmpp.core.message(sender, element)
@idiokit.stream
def commit(self, db, commit_interval=1.0):
while True:
yield timer.sleep(commit_interval)
db.commit()
if __name__ == "__main__":
HistorianService.from_command_line().execute()
| mit | 4,675,445,567,437,754,000 | 31.557632 | 80 | 0.48847 | false |
mesutcang/mongodb-word2vec-doc2vec | main.py | 1 | 1814 | # -*- encoding: utf-8 -*-
from glob import glob
from pymongo import MongoClient
from gensim import models
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
def fillMongo(db):
"""
gets the mongodb connection and fills the database.
"""
for index, file in enumerate(glob('./**/*.txt',recursive=True)):
db.deneme.insert_one(
{
"id" : index + 1,
"filename" : file,
"class" : file.split("/")[-2],
"text" : open(file, encoding="iso-8859-9").read().strip()
})
def mongoDocumentsSplitted( db ):
splitted_records = []
for record in db["deneme"].find():
splitted_records.extend( record["text"].split() )
return splitted_records
def mongoDocuments2Sentences( db ):
sentences = []
for record in db["deneme"].find():
sentence = models.doc2vec.LabeledSentence( words = record["text"].split(), tags = record["class"] )
sentences.append(sentence)
return sentences
def main():
"""
Main application execution.
"""
db = MongoClient('localhost', 27017).test
fillMongo(db)
sentences = mongoDocumentsSplitted(db)
w2v_model = models.Word2Vec(sentences, workers=4)
w2v_model.save("word2vec.bin")
d2v_model = models.Doc2Vec( mongoDocuments2Sentences( db ), workers=4 )
d2v_model.save("doc2vec.bin")
random_records = db.deneme.aggregate( [ { "$sample": {"size": 10} } ] )
infer_vectors= []
vectors=[]
for record in random_records:
vectors.append(record["text"])
infer_vectors.append(np.array(d2v_model.infer_vector(record['text'].split(), alpha=0.025, min_alpha=0.025, steps=20)).reshape(-1, 1))
for i in range(len(infer_vectors)-1):
print("vector1: ", vectors[i])
print("vector2: ", vectors[i+1])
print("cosine: ", cosine_similarity(infer_vectors[i], infer_vectors[i+1])) # Print out = ~0.00795774
if __name__ == "__main__":
main()
| gpl-3.0 | 2,473,236,960,632,347,600 | 26.074627 | 135 | 0.675854 | false |
HewlettPackard/python-hpOneView | tests/unit/resources/networking/test_connection_templates.py | 2 | 3312 | # -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2017) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
import unittest
import mock
from hpOneView.connection import connection
from hpOneView.resources.networking.connection_templates import ConnectionTemplates
from hpOneView.resources.resource import ResourceClient
class ConnectionTemplatesTest(unittest.TestCase):
def setUp(self):
self.host = '127.0.0.1'
self.connection = connection(self.host)
self._connection_templates = ConnectionTemplates(self.connection)
@mock.patch.object(ResourceClient, 'get')
def test_get_called_once(self, mock_get):
self._connection_templates.get('7a9f7d09-3c24-4efe-928f-50a1af411120')
mock_get.assert_called_once_with(
'7a9f7d09-3c24-4efe-928f-50a1af411120')
@mock.patch.object(ResourceClient, 'get_all')
def test_get_all_called_once(self, mock_get_all):
filter = 'name=TestName'
sort = 'name:ascending'
self._connection_templates.get_all(2, 500, filter, sort)
mock_get_all.assert_called_once_with(2, 500, filter=filter, sort=sort)
@mock.patch.object(ResourceClient, 'get_by')
def test_get_by_called_once(self, mock_get_by):
self._connection_templates.get_by(
'name', 'name1128673347-1465916352647')
mock_get_by.assert_called_once_with(
'name', 'name1128673347-1465916352647')
@mock.patch.object(ResourceClient, 'get')
def test_get_default_called_once(self, mock_get):
self._connection_templates.get_default()
uri = '/rest/connection-templates/defaultConnectionTemplate'
mock_get.assert_called_once_with(uri)
@mock.patch.object(ResourceClient, 'update')
def test_update_called_once(self, mock_update):
con_template = {
"type": "connection-templates",
"bandwidth": {
"maximumBandwidth": 10000,
"typicalBandwidth": 2000
},
"name": "CT-23"
}
self._connection_templates.update(con_template, 70)
mock_update.assert_called_once_with(con_template, timeout=70,
default_values=self._connection_templates.DEFAULT_VALUES)
| mit | -7,821,508,190,935,121,000 | 39.390244 | 101 | 0.694143 | false |
whyflyru/django-cacheops | cacheops/query.py | 1 | 20852 | # -*- coding: utf-8 -*-
import sys
import json
import six
from funcy import select_keys, cached_property, once, once_per, monkey, wraps
from funcy.py2 import mapcat, map
from .cross import pickle, md5
import django
from django.utils.encoding import smart_str
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS
from django.db.models import Manager, Model
from django.db.models.query import QuerySet
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.signals import pre_save, post_save, post_delete, m2m_changed
try:
from django.db.models.query import MAX_GET_RESULTS
except ImportError:
MAX_GET_RESULTS = None
from .conf import model_profile, redis_client, handle_connection_failure, LRU, ALL_OPS
from .utils import monkey_mix, get_model_name, stamp_fields, load_script, \
func_cache_key, cached_view_fab, get_thread_id, family_has_profile
from .tree import dnfs
from .invalidation import invalidate_obj, invalidate_dict
__all__ = ('cached_as', 'cached_view_as', 'install_cacheops')
_local_get_cache = {}
@handle_connection_failure
def cache_thing(cache_key, data, cond_dnfs, timeout):
"""
Writes data to cache and creates appropriate invalidators.
"""
load_script('cache_thing', LRU)(
keys=[cache_key],
args=[
pickle.dumps(data, -1),
json.dumps(cond_dnfs, default=str),
timeout
]
)
def cached_as(*samples, **kwargs):
"""
Caches results of a function and invalidates them same way as given queryset.
NOTE: Ignores queryset cached ops settings, just caches.
"""
timeout = kwargs.get('timeout')
extra = kwargs.get('extra')
key_func = kwargs.get('key_func', func_cache_key)
# If we unexpectedly get list instead of queryset return identity decorator.
# Paginator could do this when page.object_list is empty.
# TODO: think of better way doing this.
if len(samples) == 1 and isinstance(samples[0], list):
return lambda func: func
def _get_queryset(sample):
if isinstance(sample, Model):
queryset = sample.__class__.objects.inplace().filter(pk=sample.pk)
elif isinstance(sample, type) and issubclass(sample, Model):
queryset = sample.objects.all()
else:
queryset = sample
queryset._require_cacheprofile()
return queryset
querysets = map(_get_queryset, samples)
cond_dnfs = mapcat(dnfs, querysets)
key_extra = [qs._cache_key() for qs in querysets]
key_extra.append(extra)
if not timeout:
timeout = min(qs._cacheconf['timeout'] for qs in querysets)
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
cache_key = 'as:' + key_func(func, args, kwargs, key_extra)
cache_data = redis_client.get(cache_key)
if cache_data is not None:
return pickle.loads(cache_data)
result = func(*args, **kwargs)
cache_thing(cache_key, result, cond_dnfs, timeout)
return result
return wrapper
return decorator
def cached_view_as(*samples, **kwargs):
return cached_view_fab(cached_as)(*samples, **kwargs)
class QuerySetMixin(object):
@cached_property
def _cacheprofile(self):
profile = model_profile(self.model)
if profile:
self._cacheconf = profile.copy()
self._cacheconf['write_only'] = False
return profile
@cached_property
def _cloning(self):
return 1000
def _require_cacheprofile(self):
if self._cacheprofile is None:
raise ImproperlyConfigured(
'Cacheops is not enabled for %s.%s model.\n'
'If you don\'t want to cache anything by default '
'you can configure it with empty ops.'
% (self.model._meta.app_label, get_model_name(self.model)))
def _cache_key(self, extra=''):
"""
Compute a cache key for this queryset
"""
md = md5()
md.update('%s.%s' % (self.__class__.__module__, self.__class__.__name__))
# Vary cache key for proxy models
md.update('%s.%s' % (self.model.__module__, self.model.__name__))
# Protect from field list changes in model
md.update(stamp_fields(self.model))
# Use query SQL as part of a key
try:
sql, params = self.query.get_compiler(self._db or DEFAULT_DB_ALIAS).as_sql()
try:
sql_str = sql % params
except UnicodeDecodeError:
sql_str = sql % map(smart_str, params)
md.update(smart_str(sql_str))
except EmptyResultSet:
pass
# If query results differ depending on database
if self._cacheprofile and not self._cacheprofile['db_agnostic']:
md.update(self.db)
if extra:
md.update(str(extra))
# Thing only appeared in Django 1.8 and was renamed in Django 1.9
it_class = getattr(self, '_iterator_class', None) or getattr(self, '_iterable_class', None)
if it_class:
md.update('%s.%s' % (it_class.__module__, it_class.__name__))
# 'flat' attribute changes results formatting for values_list() in Django 1.8 and earlier
if hasattr(self, 'flat'):
md.update(str(self.flat))
return 'q:%s' % md.hexdigest()
def _cache_results(self, cache_key, results):
cond_dnfs = dnfs(self)
cache_thing(cache_key, results, cond_dnfs, self._cacheconf['timeout'])
def cache(self, ops=None, timeout=None, write_only=None):
"""
Enables caching for given ops
ops - a subset of {'get', 'fetch', 'count', 'exists'},
ops caching to be turned on, all enabled by default
timeout - override default cache timeout
write_only - don't try fetching from cache, still write result there
NOTE: you actually can disable caching by omiting corresponding ops,
.cache(ops=[]) disables caching for this queryset.
"""
self._require_cacheprofile()
if ops is None or ops == 'all':
ops = ALL_OPS
if isinstance(ops, str):
ops = [ops]
self._cacheconf['ops'] = set(ops)
if timeout is not None:
self._cacheconf['timeout'] = timeout
if write_only is not None:
self._cacheconf['write_only'] = write_only
return self
def nocache(self):
"""
Convinience method, turns off caching for this queryset
"""
# cache profile not present means caching is not enabled for this model
if self._cacheprofile is None:
return self
else:
return self.cache(ops=[])
def cloning(self, cloning=1000):
self._cloning = cloning
return self
def inplace(self):
return self.cloning(0)
if django.VERSION >= (1, 9):
def _clone(self, **kwargs):
if self._cloning:
return self.clone(**kwargs)
else:
self.__dict__.update(kwargs)
return self
def clone(self, **kwargs):
kwargs.setdefault('_cacheprofile', self._cacheprofile)
if hasattr(self, '_cacheconf'):
kwargs.setdefault('_cacheconf', self._cacheconf)
clone = self._no_monkey._clone(self, **kwargs)
clone._cloning = self._cloning - 1 if self._cloning else 0
return clone
else:
def _clone(self, klass=None, setup=False, **kwargs):
if self._cloning:
return self.clone(klass, setup, **kwargs)
elif klass is not None:
# HACK: monkey patch self.query.clone for single call
# to return itself instead of cloning
original_query_clone = self.query.clone
def query_clone():
self.query.clone = original_query_clone
return self.query
self.query.clone = query_clone
return self.clone(klass, setup, **kwargs)
else:
self.__dict__.update(kwargs)
return self
def clone(self, klass=None, setup=False, **kwargs):
kwargs.setdefault('_cacheprofile', self._cacheprofile)
if hasattr(self, '_cacheconf'):
kwargs.setdefault('_cacheconf', self._cacheconf)
clone = self._no_monkey._clone(self, klass, setup, **kwargs)
clone._cloning = self._cloning - 1 if self._cloning else 0
return clone
def iterator(self):
# TODO: do not cache empty queries in Django 1.6
superiter = self._no_monkey.iterator
cache_this = self._cacheprofile and 'fetch' in self._cacheconf['ops']
if cache_this:
cache_key = self._cache_key()
if not self._cacheconf['write_only'] and not self._for_write:
# Trying get data from cache
cache_data = redis_client.get(cache_key)
if cache_data is not None:
results = pickle.loads(cache_data)
for obj in results:
yield obj
raise StopIteration
# Cache miss - fallback to overriden implementation
results = []
for obj in superiter(self):
if cache_this:
results.append(obj)
yield obj
if cache_this:
self._cache_results(cache_key, results)
raise StopIteration
def count(self):
if self._cacheprofile and 'count' in self._cacheconf['ops']:
# Optmization borrowed from overriden method:
# if queryset cache is already filled just return its len
# NOTE: there is no self._iter in Django 1.6+, so we use getattr() for compatibility
if self._result_cache is not None and not getattr(self, '_iter', None):
return len(self._result_cache)
return cached_as(self)(lambda: self._no_monkey.count(self))()
else:
return self._no_monkey.count(self)
def get(self, *args, **kwargs):
# .get() uses the same .iterator() method to fetch data,
# so here we add 'fetch' to ops
if self._cacheprofile and 'get' in self._cacheconf['ops']:
# NOTE: local_get=True enables caching of simple gets in local memory,
# which is very fast, but not invalidated.
# Don't bother with Q-objects, select_related and previous filters,
# simple gets - thats what we are really up to here.
if self._cacheprofile['local_get'] \
and not args \
and not self.query.select_related \
and not self.query.where.children:
# NOTE: We use simpler way to generate a cache key to cut costs.
# Some day it could produce same key for diffrent requests.
key = (self.__class__, self.model) + tuple(sorted(kwargs.items()))
try:
return _local_get_cache[key]
except KeyError:
_local_get_cache[key] = self._no_monkey.get(self, *args, **kwargs)
return _local_get_cache[key]
except TypeError:
# If some arg is unhashable we can't save it to dict key,
# we just skip local cache in that case
pass
if 'fetch' in self._cacheconf['ops']:
qs = self
else:
qs = self._clone().cache()
else:
qs = self
return qs._no_monkey.get(qs, *args, **kwargs)
if django.VERSION >= (1, 6):
def exists(self):
if self._cacheprofile and 'exists' in self._cacheconf['ops']:
if self._result_cache is not None:
return bool(self._result_cache)
return cached_as(self)(lambda: self._no_monkey.exists(self))()
else:
return self._no_monkey.exists(self)
if django.VERSION >= (1, 5):
def bulk_create(self, objs, batch_size=None):
objs = self._no_monkey.bulk_create(self, objs, batch_size=batch_size)
if family_has_profile(self.model):
for obj in objs:
invalidate_obj(obj)
return objs
elif django.VERSION >= (1, 4):
def bulk_create(self, objs):
objs = self._no_monkey.bulk_create(self, objs)
if family_has_profile(self.model):
for obj in objs:
invalidate_obj(obj)
return objs
def connect_first(signal, receiver, sender):
old_receivers = signal.receivers
signal.receivers = []
signal.connect(receiver, sender=sender)
signal.receivers += old_receivers
# We need to stash old object before Model.save() to invalidate on its properties
_old_objs = {}
class ManagerMixin(object):
@once_per('cls')
def _install_cacheops(self, cls):
cls._cacheprofile = model_profile(cls)
if family_has_profile(cls):
# Set up signals
connect_first(pre_save, self._pre_save, sender=cls)
connect_first(post_save, self._post_save, sender=cls)
connect_first(post_delete, self._post_delete, sender=cls)
# Install auto-created models as their module attributes to make them picklable
module = sys.modules[cls.__module__]
if not hasattr(module, cls.__name__):
setattr(module, cls.__name__, cls)
def contribute_to_class(self, cls, name):
self._no_monkey.contribute_to_class(self, cls, name)
# Django 1.7+ migrations create lots of fake models, just skip them
# NOTE: we make it here rather then inside _install_cacheops()
# because we don't want @once_per() to hold refs to all of them.
if cls.__module__ != '__fake__':
self._install_cacheops(cls)
def _pre_save(self, sender, instance, **kwargs):
if instance.pk is not None:
try:
_old_objs[get_thread_id(), sender, instance.pk] = sender.objects.get(pk=instance.pk)
except sender.DoesNotExist:
pass
def _post_save(self, sender, instance, **kwargs):
# Invoke invalidations for both old and new versions of saved object
old = _old_objs.pop((get_thread_id(), sender, instance.pk), None)
if old:
invalidate_obj(old)
invalidate_obj(instance)
# NOTE: it's possible for this to be a subclass, e.g. proxy, without cacheprofile,
# but its base having one. Or vice versa.
# We still need to invalidate in this case, but cache on save better be skipped.
if not instance._cacheprofile:
return
# Enabled cache_on_save makes us write saved object to cache.
# Later it can be retrieved with .get(<cache_on_save_field>=<value>)
# <cache_on_save_field> is pk unless specified.
# This sweet trick saves a db request and helps with slave lag.
cache_on_save = instance._cacheprofile.get('cache_on_save')
if cache_on_save:
# HACK: We get this object "from field" so it can contain
# some undesirable attributes or other objects attached.
# RelatedField accessors do that, for example.
#
# So we strip down any _*_cache attrs before saving
# and later reassign them
unwanted_dict = select_keys(r'^_.*_cache$', instance.__dict__)
for k in unwanted_dict:
del instance.__dict__[k]
key = 'pk' if cache_on_save is True else cache_on_save
# Django doesn't allow filters like related_id = 1337.
# So we just hacky strip _id from end of a key
# TODO: make it right, _meta.get_field() should help
filter_key = key[:-3] if key.endswith('_id') else key
cond = {filter_key: getattr(instance, key)}
qs = sender.objects.inplace().filter(**cond).order_by()
if MAX_GET_RESULTS:
qs = qs[:MAX_GET_RESULTS + 1]
qs._cache_results(qs._cache_key(), [instance])
# Reverting stripped attributes
instance.__dict__.update(unwanted_dict)
def _post_delete(self, sender, instance, **kwargs):
"""
Invalidation upon object deletion.
"""
# NOTE: this will behave wrong if someone changed object fields
# before deletion (why anyone will do that?)
invalidate_obj(instance)
# Django 1.5- compatability
if not hasattr(Manager, 'get_queryset'):
def get_queryset(self):
return self.get_query_set()
def inplace(self):
return self.get_queryset().inplace()
def cache(self, *args, **kwargs):
return self.get_queryset().cache(*args, **kwargs)
def nocache(self):
return self.get_queryset().nocache()
def invalidate_m2m(sender=None, instance=None, model=None, action=None, pk_set=None, reverse=None,
**kwargs):
"""
Invoke invalidation on m2m changes.
"""
# Skip this machinery for explicit through tables,
# since post_save and post_delete events are triggered for them
if not sender._meta.auto_created:
return
if action not in ('pre_clear', 'post_add', 'pre_remove'):
return
m2m = next(m2m for m2m in instance._meta.many_to_many + model._meta.many_to_many
if m2m.rel.through == sender)
# TODO: optimize several invalidate_objs/dicts at once
if action == 'pre_clear':
# TODO: always use column names here once Django 1.3 is dropped
instance_field = m2m.m2m_reverse_field_name() if reverse else m2m.m2m_field_name()
objects = sender.objects.filter(**{instance_field: instance.pk})
for obj in objects:
invalidate_obj(obj)
elif action in ('post_add', 'pre_remove'):
instance_column, model_column = m2m.m2m_column_name(), m2m.m2m_reverse_name()
if reverse:
instance_column, model_column = model_column, instance_column
# NOTE: we don't need to query through objects here,
# cause we already know all their meaningfull attributes.
for pk in pk_set:
invalidate_dict(sender, {
instance_column: instance.pk,
model_column: pk
})
@once
def install_cacheops():
"""
Installs cacheops by numerous monkey patches
"""
monkey_mix(Manager, ManagerMixin)
monkey_mix(QuerySet, QuerySetMixin)
QuerySet._cacheprofile = QuerySetMixin._cacheprofile
QuerySet._cloning = QuerySetMixin._cloning
# DateQuerySet existed in Django 1.7 and earlier
# Values*QuerySet existed in Django 1.8 and earlier
from django.db.models import query
for cls_name in ('ValuesQuerySet', 'ValuesListQuerySet', 'DateQuerySet'):
if hasattr(query, cls_name):
cls = getattr(query, cls_name)
monkey_mix(cls, QuerySetMixin, ['iterator'])
try:
# Use app registry in Django 1.7
from django.apps import apps
admin_used = apps.is_installed('django.contrib.admin')
get_models = apps.get_models
except ImportError:
# Introspect INSTALLED_APPS in older djangos
from django.conf import settings
admin_used = 'django.contrib.admin' in settings.INSTALLED_APPS
from django.db.models import get_models
# Install profile and signal handlers for any earlier created models
for model in get_models(include_auto_created=True):
model._default_manager._install_cacheops(model)
# Turn off caching in admin
if admin_used:
from django.contrib.admin.options import ModelAdmin
# Renamed queryset to get_queryset in Django 1.6
method_name = 'get_queryset' if hasattr(ModelAdmin, 'get_queryset') else 'queryset'
@monkey(ModelAdmin, name=method_name)
def get_queryset(self, request):
return get_queryset.original(self, request).nocache()
# Bind m2m changed handler
m2m_changed.connect(invalidate_m2m)
# Make buffers/memoryviews pickleable to serialize binary field data
if six.PY2:
import copy_reg
copy_reg.pickle(buffer, lambda b: (buffer, (bytes(b),)))
if six.PY3:
import copyreg
copyreg.pickle(memoryview, lambda b: (memoryview, (bytes(b),)))
| bsd-3-clause | -3,087,637,271,154,831,400 | 37.543438 | 100 | 0.590255 | false |
timlau/dnf-daemon | daemon/dnfdaemon-session.py | 1 | 13512 | #!/usr/bin/python3
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
# (C) 2013-2014 Tim Lauridsen <[email protected]>
#
# dnf session bus dBus service (Readonly)
#
from dnfdaemon.server import Logger
import argparse
import dbus
import dbus.service
import dbus.mainloop.glib
import dnfdaemon.server
import logging
DAEMON_ORG = 'org.baseurl.DnfSession'
DAEMON_INTERFACE = DAEMON_ORG
logger = logging.getLogger('dnfdaemon.session')
#--------------------------------------------------------------- DBus Exception
class AccessDeniedError(dbus.DBusException):
_dbus_error_name = DAEMON_ORG + '.AccessDeniedError'
class LockedError(dbus.DBusException):
_dbus_error_name = DAEMON_ORG + '.LockedError'
class NotImplementedError(dbus.DBusException):
_dbus_error_name = DAEMON_ORG + '.NotImplementedError'
#------------------------------------------------------------------- Main class
class DnfDaemon(dnfdaemon.server.DnfDaemonBase):
def __init__(self):
dnfdaemon.server.DnfDaemonBase.__init__(self)
bus_name = dbus.service.BusName(DAEMON_ORG, bus=dbus.SessionBus())
dbus.service.Object.__init__(self, bus_name, '/')
#=========================================================================
# DBus Methods
#=========================================================================
@Logger
@dbus.service.method(DAEMON_INTERFACE,
in_signature='',
out_signature='i')
def GetVersion(self):
'''
Get the daemon version
'''
return dnfdaemon.server.API_VERSION
@Logger
@dbus.service.method(DAEMON_INTERFACE,
in_signature='',
out_signature='b',
sender_keyword='sender')
def Exit(self, sender=None):
'''
Exit the daemon
:param sender:
'''
if self._can_quit:
self.mainloop_quit()
return True
else:
return False
@Logger
@dbus.service.method(DAEMON_INTERFACE,
in_signature='',
out_signature='b',
sender_keyword='sender')
def Lock(self, sender=None):
'''
Get the yum lock
:param sender:
'''
if not self._lock:
self._lock = sender
logger.info('LOCK: Locked by : %s' % sender)
return True
return False
@Logger
@dbus.service.method(DAEMON_INTERFACE,
in_signature='b',
out_signature='b',
sender_keyword='sender')
def SetWatchdogState(self, state, sender=None):
'''
Set the Watchdog state
:param state: True = Watchdog active, False = Watchdog disabled
:type state: boolean (b)
'''
self._watchdog_disabled = not state
return state
@Logger
@dbus.service.method(DAEMON_INTERFACE,
in_signature='',
out_signature='b',
sender_keyword='sender')
def ExpireCache(self, sender=None):
'''
Enabled a list of repositories, disabled all other repos
:param repo_ids: list of repo ids to enable
:param sender:
'''
self.working_start(sender)
rc = self.expire_cache()
return self.working_ended(rc)
@Logger
@dbus.service.method(DAEMON_INTERFACE,
in_signature='s',
out_signature='as',
sender_keyword='sender')
def GetRepositories(self, filter, sender=None):
'''
Get the value a list of repo ids
:param filter: filter to limit the listed repositories
:param sender:
'''
self.working_start(sender)
repos = self.get_repositories(filter)
return self.working_ended(repos)
@Logger
@dbus.service.method(DAEMON_INTERFACE,
in_signature='as',
out_signature='',
sender_keyword='sender')
def SetEnabledRepos(self, repo_ids, sender=None):
'''
Enabled a list of repositories, disabled all other repos
:param repo_ids: list of repo ids to enable
:param sender:
'''
self.working_start(sender)
self.set_enabled_repos(repo_ids)
return self.working_ended()
@Logger
@dbus.service.method(DAEMON_INTERFACE,
in_signature='s',
out_signature='s',
sender_keyword='sender')
def GetConfig(self, setting, sender=None):
'''
Get the value of a yum config setting
it will return a JSON string of the config
:param setting: name of setting (debuglevel etc..)
:param sender:
'''
self.working_start(sender)
value = self.get_config(setting)
return self.working_ended(value)
@Logger
@dbus.service.method(DAEMON_INTERFACE,
in_signature='s',
out_signature='s',
sender_keyword='sender')
def GetRepo(self, repo_id, sender=None):
'''
Get information about a give repo_id
the repo setting will be returned as dictionary in JSON format
:param repo_id:
:param sender:
'''
self.working_start(sender)
value = self.get_repo(repo_id)
return self.working_ended(value)
@Logger
@dbus.service.method(DAEMON_INTERFACE,
in_signature='sas',
out_signature='s',
sender_keyword='sender')
def GetPackages(self, pkg_filter, fields, sender=None):
'''
Get a list of package ids, based on a package pkg_filterer
:param pkg_filter: pkg pkg_filter string ('installed','updates' etc)
:param sender:
'''
self.working_start(sender)
value = self.get_packages(pkg_filter, fields)
return self.working_ended(value)
@Logger
@dbus.service.method(DAEMON_INTERFACE,
in_signature='sasb',
out_signature='s',
sender_keyword='sender')
def GetPackagesByName(self, name, attrs, newest_only, sender=None):
'''
Get a list of packages from a name pattern
:param name: name pattern
:param newest_only: True = get newest packages only
:param attrs: list of package attributes to get
:param sender:
'''
self.working_start(sender)
values = self.get_packages_by_name_with_attr(name, attrs, newest_only)
return self.working_ended(values)
@Logger
@dbus.service.method(DAEMON_INTERFACE,
in_signature='ss',
out_signature='s',
sender_keyword='sender')
def GetAttribute(self, id, attr, sender=None):
'''
Get an attribute from a yum package id
it will return a python repr string of the attribute
:param id: yum package id
:param attr: name of attribute (summary, size,
description, changelog etc..)
:param sender:
'''
self.working_start(sender)
value = self.get_attribute(id, attr)
return self.working_ended(value)
@Logger
@dbus.service.method(DAEMON_INTERFACE,
in_signature='',
out_signature='b',
sender_keyword='sender')
def Unlock(self, sender=None):
''' release the lock'''
if self.check_lock(sender):
logger.info('UNLOCK: Lock Release by %s' % self._lock)
self._lock = None
self._reset_base()
return True
@Logger
@dbus.service.method(DAEMON_INTERFACE,
in_signature='asasasbbb',
out_signature='s',
sender_keyword='sender')
def Search(self, fields, keys, attrs, match_all, newest_only,
tags, sender=None):
'''
Search for for packages, where given fields contain given key words
:param fields: list of fields to search in
:param keys: list of keywords to search for
:param attrs: list of extra attributes to get
:param match_all: match all flag, if True return only packages
matching all keys
:param newest_only: return only the newest version of a package
:param tags: seach pkgtags
'''
self.working_start(sender)
result = self.search_with_attr(
fields, keys, attrs, match_all, newest_only, tags)
return self.working_ended(result)
@Logger
@dbus.service.method(DAEMON_INTERFACE,
in_signature='',
out_signature='s',
sender_keyword='sender')
def GetGroups(self, sender=None):
'''
Return a category/group tree
'''
self.working_start(sender)
value = self.get_groups()
return self.working_ended(value)
@Logger
@dbus.service.method(DAEMON_INTERFACE,
in_signature='ssas',
out_signature='s',
sender_keyword='sender')
def GetGroupPackages(self, grp_id, grp_flt, fields, sender=None):
'''
Get packages in a group by grp_id and grp_flt
:param grp_id: The Group id
:param grp_flt: Group Filter (all or default)
:param fields: list of package attributes to include in list
:param sender:
'''
self.working_start(sender)
value = self.get_group_pkgs(grp_id, grp_flt, fields)
return self.working_ended(value)
#
# Template for new method
#
# @dbus.service.method(DAEMON_INTERFACE,
# in_signature='',
# out_signature='',
# sender_keyword='sender')
# def NewMethod(self, sender=None ):
# '''
#
# '''
# self.working_start(sender)
# value = True
# return self.working_ended(value)
#
#=========================================================================
# DBus signals
#=========================================================================
# Parallel Download Progress signals
@dbus.service.signal(DAEMON_INTERFACE)
def ErrorMessage(self, error_msg):
''' Send an error message '''
pass
@dbus.service.signal(DAEMON_INTERFACE)
def DownloadStart(self, num_files, num_bytes):
''' Starting a new parallel download batch '''
pass
@dbus.service.signal(DAEMON_INTERFACE)
def DownloadProgress(self, name, frac, total_frac, total_files):
''' Progress for a single instance in the batch '''
pass
@dbus.service.signal(DAEMON_INTERFACE)
def DownloadEnd(self, name, status, msg):
''' Download of af single instace ended '''
pass
@dbus.service.signal(DAEMON_INTERFACE)
def RepoMetaDataProgress(self, name, frac):
''' Repository Metadata Download progress '''
#=========================================================================
# Helper methods
#=========================================================================
def working_start(self, sender):
self.check_lock(sender)
self._is_working = True
self._watchdog_count = 0
def working_ended(self, value=None):
self._is_working = False
return value
def check_lock(self, sender):
'''
Check that the current sender is owning the yum lock
:param sender:
'''
if self._lock == sender:
return True
else:
raise LockedError('dnf is locked by another application')
def main():
parser = argparse.ArgumentParser(description='Yum D-Bus Session Daemon')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('-d', '--debug', action='store_true')
parser.add_argument('--notimeout', action='store_true')
args = parser.parse_args()
if args.verbose:
if args.debug:
dnfdaemon.server.doTextLoggerSetup(logroot='dnfdaemon',
loglvl=logging.DEBUG)
else:
dnfdaemon.server.doTextLoggerSetup(logroot='dnfdaemon')
# setup the DBus mainloop
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
yd = DnfDaemon()
if not args.notimeout:
yd._setup_watchdog()
yd.mainloop_run()
if __name__ == '__main__':
main()
| gpl-2.0 | 1,706,098,923,312,927,200 | 32.864662 | 79 | 0.544627 | false |
rodynnz/python-tfstate | unit_tests/test_tfstate/test_provider/test_aws/test_aws_key_pair.py | 1 | 2130 | # -*- coding: utf-8 -*-
# Python stdlib
import unittest
# py.test
import pytest
# Python tfstate
from tfstate.provider.aws import AwsResource, AwsKeyPairResource
from tfstate.exceptions import InvalidResource
# Unit tests
from unit_tests.base import BaseResourceUnitTest
@pytest.mark.provider_aws
class AwsKeyPairResourceUnitTest(BaseResourceUnitTest):
def test_object_constructor(self):
self.load_example_json('aws/aws_key_pair/aws_key_pair_example.json')
resource_name, resource_data = self.example_data.popitem()
key_pair_resource = AwsKeyPairResource(resource_name, resource_data)
self.assertIsInstance(
key_pair_resource, AwsResource, "AwsKeyPairResource object does not inherit from AwsResource")
self.assertEqual(key_pair_resource.resource_type, "aws_key_pair", "Resource type is not aws_key_pair")
# Attribute checks
native_primary = key_pair_resource.primary_data
native_attributes = native_primary['attributes']
native_meta = native_primary['meta']
self.assertEqual(key_pair_resource.id, native_primary['id'], "Resource ID does not match")
self.assertEqual(
key_pair_resource.key_name, native_attributes['key_name'], "Resource key_name does not match")
self.assertEqual(
key_pair_resource.public_key, native_attributes['public_key'], "Resource public_key does not match")
self.assertEqual(
key_pair_resource.metadata, native_meta, "Resource metadata does not match")
def test_object_constructor_invalid_type(self):
self.load_example_json(
'aws/aws_key_pair/aws_key_pair_example_invalid_type.json')
resource_name, resource_data = self.example_data.popitem()
with self.assertRaises(InvalidResource):
AwsKeyPairResource(resource_name, resource_data)
def suite():
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTest(loader.loadTestsFromTestCase(AwsKeyPairResourceUnitTest))
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| lgpl-3.0 | 5,549,204,734,299,582,000 | 39.188679 | 112 | 0.705634 | false |
westpark/wallball | docs/steps/code/s2d.py | 1 | 1630 | WIDTH = 640
HEIGHT = 480
class Ball(ZRect): pass
#
# The ball is a red square halfway across the game screen
#
ball = Ball(0, 0, 30, 30)
ball.center = WIDTH / 2, HEIGHT / 2
ball.colour = "red"
#
# The ball moves one step right and one step down each tick
#
ball.direction = 1, 1
#
# The ball moves at a speed of 3 steps each tick
#
ball.speed = 3
class Bat(ZRect): pass
#
# The bat is a green oblong which starts just along the bottom
# of the screen and halfway across.
#
BAT_W = 150
BAT_H = 15
bat = Bat(WIDTH / 2, HEIGHT - BAT_H, BAT_W, BAT_H)
bat.colour = "green"
def draw():
#
# Clear the screen and place the ball at its current position
#
screen.clear()
screen.draw.filled_rect(ball, ball.colour)
screen.draw.filled_rect(bat, bat.colour)
def on_mouse_move(pos):
#
# Make the bat follow the horizontal movement of the mouse.
#
x, y = pos
bat.centerx = x
def update():
#
# Move the ball along its current direction at its current speed
#
dx, dy = ball.direction
ball.move_ip(ball.speed * dx, ball.speed * dy)
#
# Bounce the ball off the bat
#
if ball.colliderect(bat):
ball.direction = dx, -dy
#
# Bounce the ball off the left or right walls
#
if ball.right >= WIDTH or ball.left <= 0:
ball.direction = -dx, dy
#
# If the ball hits the bottom of the screen, you lose
#
if ball.bottom >= HEIGHT:
exit()
#
# Bounce the ball off the top wall
#
if ball.top <= 0:
ball.direction = dx, -dy | mit | 6,844,163,819,388,710,000 | 20.054054 | 68 | 0.590798 | false |
glottobank/pycldf | src/pycldf/cli_util.py | 1 | 2128 | from clldutils.clilib import PathType
from pycldf import Dataset, Database
#
# Copied from distutils.util - because we don't want to deal with deprecation warnings.
#
def strtobool(val): # pragma: no cover
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
"""
val = val.lower()
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return 1
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return 0
else:
raise ValueError("invalid truth value %r" % (val,))
class FlagOrPathType(PathType):
def __call__(self, string):
try:
return bool(strtobool(string))
except ValueError:
return super().__call__(string)
def add_dataset(parser):
parser.add_argument(
'dataset',
metavar='DATASET',
help="Dataset specification (i.e. path to a CLDF metadata file or to the data file)",
type=PathType(type='file'),
)
def get_dataset(args):
if args.dataset.suffix == '.json':
return Dataset.from_metadata(args.dataset)
return Dataset.from_data(args.dataset)
def add_database(parser, must_exist=True):
add_dataset(parser)
parser.add_argument(
'db',
metavar='SQLITE_DB_PATH',
help='Path to the SQLite db file',
type=PathType(type='file', must_exist=must_exist),
)
parser.add_argument('--infer-primary-keys', action='store_true', default=False)
def get_database(args):
return Database(get_dataset(args), fname=args.db, infer_primary_keys=args.infer_primary_keys)
def add_catalog_spec(parser, name):
parser.add_argument(
'--' + name,
metavar=name.upper(),
type=PathType(type='dir'),
help='Path to repository clone of {0} data'.format(name.capitalize()))
parser.add_argument(
'--{0}-version'.format(name),
help='Version of {0} data to checkout'.format(name.capitalize()),
default=None)
| apache-2.0 | 8,012,565,799,063,346,000 | 28.555556 | 97 | 0.610902 | false |
azumimuo/family-xbmc-addon | plugin.video.salts/scrapers/yifystreaming_scraper.py | 1 | 4784 | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
import urlparse
from salts_lib import dom_parser
from salts_lib import kodi
from salts_lib import log_utils
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import QUALITIES
import scraper
BASE_URL = 'http://yss.rocks'
GK_URL = '/plugins/gkpluginsphp.php'
CATEGORIES = {VIDEO_TYPES.MOVIE: 'category-movies', VIDEO_TYPES.EPISODE: 'category-tv-series'}
LOCAL_USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36'
class YifyStreaming_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'yify-streaming'
def resolve_link(self, link):
return link
def format_source_label(self, item):
return '[%s] %s' % (item['quality'], item['host'])
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
match = re.search('<iframe[^>]+src="([^"]+watch=([^"]+))', html)
if match:
iframe_url, link_id = match.groups()
data = {'link': link_id}
headers = {'Referer': iframe_url}
headers['User-Agent'] = LOCAL_USER_AGENT
gk_url = urlparse.urljoin(self.base_url, GK_URL)
html = self._http_get(gk_url, data=data, headers=headers, cache_limit=.5)
js_data = scraper_utils.parse_json(html, gk_url)
if 'link' in js_data:
if isinstance(js_data['link'], list):
sources = dict((link['link'], scraper_utils.height_get_quality(link['label'])) for link in js_data['link'])
direct = True
else:
sources = {js_data['link']: QUALITIES.HIGH}
direct = False
for source in sources:
source = source.replace('\\/', '/')
if direct:
host = self._get_direct_hostname(source)
else:
host = urlparse.urlparse(source).hostname
hoster = {'multi-part': False, 'url': source, 'class': self, 'quality': sources[source], 'host': host, 'rating': None, 'views': None, 'direct': direct}
hosters.append(hoster)
return hosters
def get_url(self, video):
return self._default_get_url(video)
def search(self, video_type, title, year):
search_url = urlparse.urljoin(self.base_url, '/?s=')
search_url += urllib.quote_plus(title)
html = self._http_get(search_url, cache_limit=.25)
elements = dom_parser.parse_dom(html, 'li', {'class': '[^"]*post-\d+[^"]*'})
results = []
for element in elements:
match = re.search('href="([^"]+)[^>]+>\s*([^<]+)', element, re.DOTALL)
if match:
url, match_title_year = match.groups()
match = re.search('(.*?)(?:\s+\(?(\d{4})\)?)', match_title_year)
if match:
match_title, match_year = match.groups()
else:
match_title = match_title_year
match_year = ''
if not year or not match_year or year == match_year:
result = {'title': match_title, 'year': match_year, 'url': scraper_utils.pathify_url(url)}
results.append(result)
return results
| gpl-2.0 | 7,210,410,083,957,632,000 | 40.241379 | 175 | 0.567099 | false |
ua-snap/downscale | snap_scripts/epscor_sc/compare_downscaling_versions_plots_cmip5_epscor_sc_pr_compare_withNOFIX.py | 1 | 9093 | # # # # # compare tasmin, tas, tasmax in a timeseries of GeoTiff files # # # #
def transform_from_latlon( lat, lon ):
''' simple way to make an affine transform from lats and lons coords '''
from affine import Affine
lat = np.asarray( lat )
lon = np.asarray(lon)
trans = Affine.translation(lon[0], lat[0])
scale = Affine.scale(lon[1] - lon[0], lat[1] - lat[0])
return trans * scale
def rasterize( shapes, coords, latitude='latitude', longitude='longitude', fill=None, **kwargs ):
'''
Rasterize a list of (geometry, fill_value) tuples onto the given
xarray coordinates. This only works for 1d latitude and longitude
arrays.
'''
from rasterio import features
if fill == None:
fill = np.nan
transform = transform_from_latlon( coords[ latitude ], coords[ longitude ] )
out_shape = ( len( coords[ latitude ] ), len( coords[ longitude ] ) )
raster = features.rasterize(shapes, out_shape=out_shape,
fill=fill, transform=transform,
dtype=float, **kwargs)
spatial_coords = {latitude: coords[latitude], longitude: coords[longitude]}
return xr.DataArray(raster, coords=spatial_coords, dims=(latitude, longitude))
def sort_files( files, split_on='_', elem_month=-2, elem_year=-1 ):
'''
sort a list of files properly using the month and year parsed
from the filename. This is useful with SNAP data since the standard
is to name files like '<prefix>_MM_YYYY.tif'. If sorted using base
Pythons sort/sorted functions, things will be sorted by the first char
of the month, which makes thing go 1, 11, ... which sucks for timeseries
this sorts it properly following SNAP standards as the default settings.
ARGUMENTS:
----------
files = [list] list of `str` pathnames to be sorted by month and year. usually from glob.glob.
split_on = [str] `str` character to split the filename on. default:'_', SNAP standard.
elem_month = [int] slice element from resultant split filename list. Follows Python slicing syntax.
default:-2. For SNAP standard.
elem_year = [int] slice element from resultant split filename list. Follows Python slicing syntax.
default:-1. For SNAP standard.
RETURNS:
--------
sorted `list` by month and year ascending.
'''
import pandas as pd
months = [ int(os.path.basename( fn ).split('.')[0].split( split_on )[elem_month]) for fn in files ]
years = [ int(os.path.basename( fn ).split('.')[0].split( split_on )[elem_year]) for fn in files ]
df = pd.DataFrame( {'fn':files, 'month':months, 'year':years} )
df_sorted = df.sort_values( ['year', 'month' ] )
return df_sorted.fn.tolist()
def only_years( files, begin=1901, end=2100, split_on='_', elem_year=-1 ):
'''
return new list of filenames where they are truncated to begin:end
ARGUMENTS:
----------
files = [list] list of `str` pathnames to be sorted by month and year. usually from glob.glob.
begin = [int] four digit integer year of the begin time default:1901
end = [int] four digit integer year of the end time default:2100
split_on = [str] `str` character to split the filename on. default:'_', SNAP standard.
elem_year = [int] slice element from resultant split filename list. Follows Python slicing syntax.
default:-1. For SNAP standard.
RETURNS:
--------
sliced `list` to begin and end year.
'''
import pandas as pd
years = [ int(os.path.basename( fn ).split('.')[0].split( split_on )[elem_year]) for fn in files ]
df = pd.DataFrame( { 'fn':files, 'year':years } )
df_slice = df[ (df.year >= begin ) & (df.year <= end ) ]
return df_slice.fn.tolist()
def masked_mean( fn, bounds=None ):
''' get mean of the full domain since the data are already clipped
mostly used for processing lots of files in parallel.'''
import numpy as np
import rasterio
with rasterio.open( fn ) as rst:
if bounds:
window = rst.window( *bounds )
else:
window = rst.window( *rst.bounds )
mask = (rst.read_masks( 1 ) == 0)
arr = np.ma.masked_array( rst.read( 1, window=window ), mask=mask )
return np.mean( arr )
if __name__ == '__main__':
import os, glob
import geopandas as gpd
import numpy as np
import xarray as xr
import matplotlib
matplotlib.use( 'agg' )
from matplotlib import pyplot as plt
from pathos.mp_map import mp_map
import pandas as pd
import geopandas as gpd
# args / set working dir
base_dir = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data'
os.chdir( base_dir )
# scenarios = ['rcp60', 'rcp85']
scenarios = ['historical']
shp_fn = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/SCTC_studyarea/Kenai_StudyArea.shp'
shp = gpd.read_file( shp_fn )
bounds = shp.bounds
# models = ['5ModelAvg','CRU_TS323','GFDL-CM3','GISS-E2-R','IPSL-CM5A-LR','MRI-CGCM3','NCAR-CCSM4']
# models = ['GFDL-CM3','GISS-E2-R','IPSL-CM5A-LR','MRI-CGCM3','NCAR-CCSM4']
models = ['ts323']
variables_list = [['pr']]# ['tasmax', 'tas', 'tasmin']]#,
# models = ['CRU_TS323']
# begin_end_groups = [[2016,2016],[2010,2020],[2095, 2100]]
begin_end_groups = [[1916, 1916],[1950, 1960],[1995, 2000]]
for scenario in scenarios:
for variables in variables_list:
for m in models:
for begin, end in begin_end_groups: # not fully wired-up yet
if m == 'ts323':
old_dir = '/Data/Base_Data/Climate/AK_CAN_2km/historical/CRU/CRU_TS32'
# begin = 1950
# end = 1965
else:
if scenario == 'historical':
old_dir = '/Data/Base_Data/Climate/AK_CAN_2km/historical/AR5_CMIP5_models'
# begin = 1950
# end = 1965
else:
old_dir = '/Data/Base_Data/Climate/AK_CAN_2km/projected/AR5_CMIP5_models'
# begin = 2095
# end = 2100
figsize = (16,9)
out = {}
for v in variables:
path = os.path.join( base_dir,'downscaled', m, scenario, v )
print( path )
files = glob.glob( os.path.join( path, '*.tif' ) )
files = sort_files( only_years( files, begin=begin, end=end, split_on='_', elem_year=-1 ) )
out[ v ] = mp_map( masked_mean, files, nproc=4 )
if v == 'tas' or v == 'pr':
if m == 'ts323':
path = os.path.join( old_dir, v )
print( path )
else:
path = os.path.join( old_dir, scenario, m, v )
files = glob.glob( os.path.join( path, '*.tif' ) )
files = sort_files( only_years( files, begin=begin, end=end, split_on='_', elem_year=-1 ) )
out[ v+'_old' ] = mp_map( masked_mean, files, nproc=4 )
# nofix
path = os.path.join( base_dir,'downscaled_pr_nofix', m, scenario, v )
print( path )
files = glob.glob( os.path.join( path, '*.tif' ) )
files = sort_files( only_years( files, begin=begin, end=end, split_on='_', elem_year=-1 ) )
out[ v+'_nofix' ] = mp_map( masked_mean, files, nproc=4 )
plot_df = pd.DataFrame( out )
plot_df.index = pd.date_range( start=str(begin), end=str(end+1), freq='M' )
# sort the columns for output plotting cleanliness:
if 'tas' in variables:
col_list = ['tasmax', 'tas_old', 'tas', 'tasmin']
elif 'pr' in variables:
col_list = ['pr', 'pr_old', 'pr_nofix']
plot_df = plot_df[ col_list ] # get em in the order for plotting
if v == 'pr':
plot_df = plot_df.round()[['pr','pr_old']]
# now plot the dataframe
if begin == end:
title = 'EPSCoR SC AOI Temp Metrics {} {} {}'.format( m, scenario, begin )
else:
title = 'EPSCoR SC AOI Temp Metrics {} {} {} - {}'.format( m, scenario, begin, end )
if 'tas' in variables:
colors = ['red', 'black', 'blue', 'red' ]
else:
colors = [ 'blue', 'black', 'darkred' ]
ax = plot_df.plot( kind='line', title=title, figsize=figsize, color=colors )
output_dir = os.path.join( base_dir, 'compare_downscaling_versions_PR_no_fix' )
if not os.path.exists( output_dir ):
os.makedirs( output_dir )
# now plot the dataframe
out_metric_fn = 'temps'
if 'pr' in variables:
out_metric_fn = 'prec'
if begin == end:
output_filename = os.path.join( output_dir,'mean_{}_epscor_sc_{}_{}_{}.png'.format( out_metric_fn, m, scenario, begin ) )
else:
output_filename = os.path.join( output_dir,'mean_{}_epscor_sc_{}_{}_{}_{}.png'.format( out_metric_fn, m, scenario, begin, end ) )
plt.savefig( output_filename, dpi=400 )
plt.close()
# # # PRISM TEST VERSION DIFFERENCES # # # # # # #
# import rasterio
# import numpy as np
# import os, glob, itertools
# base_path = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/prism/raw_prism'
# variables = [ 'tmax', 'tmin' ]
# for variable in variables:
# ak_olds = sorted( glob.glob( os.path.join( base_path, 'prism_raw_older', 'ak', variable, '*.asc' ) ) )
# ak_news = sorted( glob.glob( os.path.join( base_path, 'prism_raw_2016', 'ak', variable, '*.asc' ) ) )
# olds = np.array([ rasterio.open( i ).read( 1 ) for i in ak_olds if '_14' not in i ])
# news = np.array([ rasterio.open( i ).read( 1 ) *.10 for i in ak_news if '_14' not in i ])
# out = olds - news
# out[ (olds == -9999.0) | (news == -9999.0) ] = 0
# uniques = np.unique( out )
# uniques[ uniques > 0.01 ]
| mit | -2,754,196,695,637,991,000 | 37.858974 | 135 | 0.634114 | false |
cyli/volatility | volatility/plugins/linux/psaux.py | 1 | 1520 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: [email protected]
@organization:
"""
import volatility.plugins.linux.pslist as linux_pslist
from volatility.renderers import TreeGrid
from volatility.renderers.basic import Address
class linux_psaux(linux_pslist.linux_pslist):
'''Gathers processes along with full command line and start time'''
def unified_output(self, data):
return TreeGrid([("Arguments", str),
("Pid", int),
("Uid", int),
("Gid", int)],
self.generator(data))
def generator(self, data):
for task in data:
yield (0, [str(task.get_commandline()), int(task.pid), int(task.uid), int(task.gid)])
| gpl-2.0 | -7,106,883,151,586,503,000 | 34.348837 | 97 | 0.675658 | false |
Ape/sc2skills | main.py | 1 | 2472 | #!/usr/bin/env python3
import argparse
import collections
import enum
import trueskill
from ladder import Ladder
DRAW_PROBABILITY = 0.001
OPPONENT_SIGMA = 0.1
Result = enum.Enum("Result", "win loss")
Game = collections.namedtuple("Game", "result mmr label")
def load_games(games_file):
games = []
with open(games_file, "r") as f:
lines = f.readlines()
for line in lines:
data = line.strip().split("#")[0]
if data:
result, mmr, label = data.split()
games.append(Game(Result[result], int(mmr), label))
return games
def print_board(ladder, ratings):
if len(ratings) == 0:
print("No ratings.")
return
def sort_by_score(items):
return reversed(sorted(items, key=lambda x: x[1].mu))
def max_name_width(items):
return max(len(x[0]) for x in items)
items = ratings.items()
items = sort_by_score(items)
items = list(items)
name_width = max_name_width(items)
for name, rating in items:
league = ladder.get_league(rating.mu)
print("{name:{width}s} {mu:.0f} ± {sigma:.0f} ({league})"
.format(name=name, width=name_width,
mu=rating.mu, sigma=2*rating.sigma,
league=league))
def rate(ladder, rating, game):
opponent = trueskill.Rating(mu=game.mmr,
sigma=OPPONENT_SIGMA * ladder.sigma)
if game.result is Result.win:
return trueskill.rate_1vs1(rating, opponent)[0]
else:
return trueskill.rate_1vs1(opponent, rating)[1]
def main():
parser = argparse.ArgumentParser()
parser.add_argument("region")
parser.add_argument("games_file")
args = parser.parse_args()
try:
ladder = Ladder(args.region)
except KeyError:
print("Error: Region '{}' not recognized".format(args.region))
return
trueskill.setup(mu=ladder.mu, sigma=ladder.sigma, beta=0.5 * ladder.sigma,
tau=0.01 * ladder.sigma, draw_probability=DRAW_PROBABILITY)
ratings = collections.defaultdict(lambda: trueskill.Rating())
try:
games = load_games(args.games_file)
except OSError as e:
print("Error: Cannot read the provided games file:")
print(" {}".format(e))
return
for game in games:
ratings[game.label] = rate(ladder, ratings[game.label], game)
print_board(ladder, ratings)
if __name__ == "__main__":
main()
| mit | -6,900,255,387,101,599,000 | 25.569892 | 79 | 0.604614 | false |
srkukarni/heron | integration_test/src/python/integration_test/topology/one_bolt_multi_tasks.py | 1 | 1322 | # copyright 2016 twitter. all rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring
from heron.api.src.python.stream import Grouping
from integration_test.src.python.integration_test.core import TestTopologyBuilder
from integration_test.src.python.integration_test.common.bolt import IdentityBolt
from integration_test.src.python.integration_test.common.spout import ABSpout
def one_bolt_multi_tasks_builder(topology_name, http_server_url):
builder = TestTopologyBuilder(topology_name, http_server_url)
ab_spout = builder.add_spout("ab-spout", ABSpout, 1)
builder.add_bolt("identity-bolt", IdentityBolt,
inputs={ab_spout: Grouping.SHUFFLE},
par=3,
optional_outputs=['word'])
return builder.create_topology()
| apache-2.0 | 2,974,093,918,747,431,000 | 41.645161 | 81 | 0.748109 | false |
Asurada2015/TFAPI_translation | math_ops_linear algebra/tf_eye.py | 1 | 1214 | import tensorflow as tf
"""tf.eye(num_rows, num_columns=None, batch_shape=None, dtype=tf.float32, name=None)
功能:返回单位阵。
输入:num_rows:矩阵的行数;num_columns:矩阵的列数,默认与行数相等
batch_shape:若提供值,则返回batch_shape的单位阵。"""
z1 = tf.eye(2, batch_shape=[2]) # 一个三维矩阵其中包含有两个2行2列的矩阵
z2 = tf.eye(2, batch_shape=[3]) # 一个三维矩阵其中包含有三个2行2列的矩阵
z3 = tf.eye(2, batch_shape=[2, 1])
z4 = tf.eye(2, batch_shape=[3, 2])
# 应该将整个特定形状的单位矩阵视为一个整体
sess = tf.Session()
print("This z1", sess.run(z1))
print("This z2", sess.run(z2))
print("This z3", sess.run(z3))
print("This z4", sess.run(z4))
sess.close()
"""
This z1
[
[[ 1. 0.]
[ 0. 1.]]
[[ 1. 0.]
[ 0. 1.]]
]
This z2
[
[[ 1. 0.]
[ 0. 1.]]
[[ 1. 0.]
[ 0. 1.]]
[[ 1. 0.]
[ 0. 1.]]
]
This z3
[
[[[ 1. 0.]
[ 0. 1.]]]
[[[ 1. 0.]
[ 0. 1.]]]
]
This z4
[
[
[[ 1. 0.]
[ 0. 1.]]
[[ 1. 0.]
[ 0. 1.]]
]
[
[[ 1. 0.]
[ 0. 1.]]
[[ 1. 0.]
[ 0. 1.]]
]
[
[[ 1. 0.]
[ 0. 1.]]
[[ 1. 0.]
[ 0. 1.]]
]
]""" | apache-2.0 | 7,731,127,931,833,210,000 | 12.381579 | 84 | 0.464567 | false |
WayStudios/fora | fora/__init__.py | 1 | 5883 | # fora
# Xu [[email protected]] Copyright 2015
from pyramid.config import Configurator
from pyramid.session import SignedCookieSessionFactory
from sqlalchemy import engine_from_config
from fora.core.dbsession import DBSession
from fora.core.model import Model
from fora.core.view import View
from fora.core.adminview import AdminView
from fora.views.install import InstallView
from fora.views.notfound import NotFoundView
from fora.views.forbidden import ForbiddenView
from fora.views.portal import PortalView
from fora.views.user import UserView
from fora.views.registration import RegistrationView
from fora.views.forum import ForumView
from fora.views.topic import TopicView
from fora.views.thread import ThreadView
from fora.views.article import ArticleView
from fora.views.adminportal import AdminPortalView
from fora.views.admindashboard import AdminDashboardView
from fora.views.adminsites import AdminSitesView
from fora.views.adminusers import AdminUsersView
from fora.views.adminforums import AdminForumsView
from fora.views.admintopics import AdminTopicsView
from fora.views.adminthreads import AdminThreadsView
from fora.views.adminarticles import AdminArticlesView
from fora.views.adminmoderators import AdminModeratorsView
from fora.views.adminconfigurations import AdminConfigurationsView
import uuid
def main(global_config, **settings):
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind = engine)
Model.metadata.bind = engine
View.path['static'] = settings['path.static']
View.path['templates'] = settings['path.templates']
AdminView.path['static'] = settings['path.static']
AdminView.path['templates'] = '%(path)s/admin' % {'path': settings['path.templates']}
config = Configurator(settings = settings, session_factory = SignedCookieSessionFactory(secret = settings['session.secret'], salt = 'fora.session'))
config.include('pyramid_chameleon')
config.add_static_view('static', settings['path.static'], cache_max_age = 3600)
config.add_forbidden_view(view = ForbiddenView)
config.add_notfound_view(view = NotFoundView)
config.add_route('install', '/install')
config.add_view(view = InstallView, route_name = 'install')
config.add_route('portal', '/')
config.add_view(view = PortalView, route_name = 'portal')
config.add_route('registration', '/user/register')
config.add_view(view = RegistrationView, route_name = 'registration')
config.add_route('user', '/user/{identity:.*}')
config.add_view(view = UserView, route_name = 'user')
config.add_route('forum', '/forum/{identity:.*}')
config.add_view(view = ForumView, route_name = 'forum')
config.add_route('topic', '/topic/{identity:.*}')
config.add_view(view = TopicView, route_name = 'topic')
config.add_route('thread', '/thread/{identity:.*}')
config.add_view(view = ThreadView, route_name = 'thread')
config.add_route('article', '/article/{identity:.*}')
config.add_view(view = ArticleView, route_name = 'article')
config.add_route('admin_portal', '/admin')
config.add_view(view = AdminPortalView, route_name = 'admin_portal')
config.add_route('admin_dashboard', '/admin/dashboard')
config.add_view(view = AdminDashboardView, route_name = 'admin_dashboard')
config.add_route('admin_topics', '/admin/topics')
config.add_view(view = AdminTopicsView, route_name = 'admin_topics')
config.add_route('admin_topics_activity', '/admin/topics/{activity}/{identity:.*}')
config.add_view(view = AdminTopicsView, route_name = 'admin_topics_activity')
config.add_route('admin_threads', '/admin/threads')
config.add_view(view = AdminThreadsView, route_name = 'admin_threads')
config.add_route('admin_threads_activity', '/admin/threads/{activity}/{identity:.*}')
config.add_view(view = AdminThreadsView, route_name = 'admin_threads_activity')
config.add_route('admin_sites', '/admin/sites')
config.add_view(view = AdminSitesView, route_name = 'admin_sites')
config.add_route('admin_sites_activity', '/admin/sites/{activity}/{identity:.*}')
config.add_view(view = AdminSitesView, route_name = 'admin_sites_activity')
config.add_route('admin_users', '/admin/users')
config.add_view(view = AdminUsersView, route_name = 'admin_users')
config.add_route('admin_users_activity', '/admin/users/{activity}/{identity:.*}')
config.add_view(view = AdminUsersView, route_name = 'admin_users_activity')
config.add_route('admin_articles', '/admin/articles')
config.add_view(view = AdminArticlesView, route_name = 'admin_articles')
config.add_route('admin_articles_activity', '/admin/articles/{activity}/{identity:.*}')
config.add_view(view = AdminArticlesView, route_name = 'admin_articles_activity')
config.add_route('admin_forums', '/admin/forums')
config.add_view(view = AdminForumsView, route_name = 'admin_forums')
config.add_route('admin_forums_activity', '/admin/forums/{activity}/{identity:.*}')
config.add_view(view = AdminForumsView, route_name = 'admin_forums_activity')
config.add_route('admin_moderators', '/admin/moderators')
config.add_view(view = AdminModeratorsView, route_name = 'admin_moderators')
config.add_route('admin_moderators_activity', '/admin/moderators/{activity}/{identity:.*}')
config.add_view(view = AdminModeratorsView, route_name = 'admin_moderators_activity')
config.add_route('admin_configurations', '/admin/configurations')
config.add_view(view = AdminConfigurationsView, route_name = 'admin_configurations')
config.add_route('admin_configurations_activity', '/admin/configurations/{activity}/{identity:.*}')
config.add_view(view = AdminConfigurationsView, route_name = 'admin_configurations_activity')
config.add_translation_dirs(settings['path.locales'])
config.scan()
return config.make_wsgi_app()
| bsd-3-clause | -8,504,085,155,422,548,000 | 54.5 | 152 | 0.734829 | false |
yaybu/touchdown | touchdown/tests/stubs/aws/rest_api.py | 1 | 1606 | # Copyright 2016 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .service import ServiceStubber
class RestApiStubber(ServiceStubber):
client_service = "apigateway"
def add_get_rest_apis_empty_response(self):
return self.add_response(
"get_rest_apis", service_response={}, expected_params={}
)
def add_get_rest_apis_one_response(self):
return self.add_response(
"get_rest_apis",
service_response={
"items": [
{"name": self.resource.name, "id": self.make_id(self.resource.name)}
]
},
expected_params={},
)
def add_create_rest_api(self):
return self.add_response(
"create_rest_api",
service_response={},
expected_params={"name": self.resource.name},
)
def add_delete_rest_api(self):
return self.add_response(
"delete_rest_api",
service_response={},
expected_params={"restApiId": self.make_id(self.resource.name)},
)
| apache-2.0 | -3,704,852,946,402,137,000 | 31.12 | 88 | 0.62142 | false |
gedaskir/qmeq | qmeq/approach/base/redfield.py | 1 | 6053 | """Module containing python functions, which generate first order Redfield kernel.
For docstrings see documentation of module neumann1."""
import numpy as np
import itertools
from ...wrappers.mytypes import doublenp
from ...wrappers.mytypes import complexnp
from ..aprclass import Approach
from .neumann1 import Approach1vN
# ---------------------------------------------------------------------------------------------------
# Redfield approach
# ---------------------------------------------------------------------------------------------------
class ApproachRedfield(Approach):
kerntype = 'pyRedfield'
def prepare_arrays(self):
Approach1vN.prepare_arrays(self)
def clean_arrays(self):
Approach1vN.clean_arrays(self)
def generate_fct(self):
Approach1vN.generate_fct(self)
def generate_coupling_terms(self, b, bp, bcharge):
Tba, phi1fct = self.leads.Tba, self.phi1fct
si, kh = self.si, self.kernel_handler
nleads, statesdm = si.nleads, si.statesdm
acharge = bcharge-1
ccharge = bcharge+1
# --------------------------------------------------
for a, ap in itertools.product(statesdm[acharge], statesdm[acharge]):
if kh.is_included(a, ap, acharge):
bpap = si.get_ind_dm1(bp, ap, acharge)
ba = si.get_ind_dm1(b, a, acharge)
fct_aap = 0
for l in range(nleads):
fct_aap += (+ Tba[l, b, a]*Tba[l, ap, bp]*phi1fct[l, bpap, 0].conjugate()
- Tba[l, b, a]*Tba[l, ap, bp]*phi1fct[l, ba, 0])
kh.set_matrix_element(fct_aap, b, bp, bcharge, a, ap, acharge)
# --------------------------------------------------
for bpp in statesdm[bcharge]:
if kh.is_included(bpp, bp, bcharge):
fct_bppbp = 0
for a in statesdm[acharge]:
bppa = si.get_ind_dm1(bpp, a, acharge)
for l in range(nleads):
fct_bppbp += +Tba[l, b, a]*Tba[l, a, bpp]*phi1fct[l, bppa, 1].conjugate()
for c in statesdm[ccharge]:
cbpp = si.get_ind_dm1(c, bpp, bcharge)
for l in range(nleads):
fct_bppbp += +Tba[l, b, c]*Tba[l, c, bpp]*phi1fct[l, cbpp, 0]
kh.set_matrix_element(fct_bppbp, b, bp, bcharge, bpp, bp, bcharge)
# --------------------------------------------------
if kh.is_included(b, bpp, bcharge):
fct_bbpp = 0
for a in statesdm[acharge]:
bppa = si.get_ind_dm1(bpp, a, acharge)
for l in range(nleads):
fct_bbpp += -Tba[l, bpp, a]*Tba[l, a, bp]*phi1fct[l, bppa, 1]
for c in statesdm[ccharge]:
cbpp = si.get_ind_dm1(c, bpp, bcharge)
for l in range(nleads):
fct_bbpp += -Tba[l, bpp, c]*Tba[l, c, bp]*phi1fct[l, cbpp, 0].conjugate()
kh.set_matrix_element(fct_bbpp, b, bp, bcharge, b, bpp, bcharge)
# --------------------------------------------------
for c, cp in itertools.product(statesdm[ccharge], statesdm[ccharge]):
if kh.is_included(c, cp, ccharge):
cpbp = si.get_ind_dm1(cp, bp, bcharge)
cb = si.get_ind_dm1(c, b, bcharge)
fct_ccp = 0
for l in range(nleads):
fct_ccp += (+ Tba[l, b, c]*Tba[l, cp, bp]*phi1fct[l, cpbp, 1]
- Tba[l, b, c]*Tba[l, cp, bp]*phi1fct[l, cb, 1].conjugate())
kh.set_matrix_element(fct_ccp, b, bp, bcharge, c, cp, ccharge)
# --------------------------------------------------
def generate_current(self):
E, Tba = self.qd.Ea, self.leads.Tba
phi1fct, phi1fct_energy = self.phi1fct, self.phi1fct_energy
si = self.si
ncharge, nleads, statesdm = si.ncharge, si.nleads, si.statesdm
phi1 = self.phi1
current = self.current
energy_current = self.energy_current
kh = self.kernel_handler
for charge in range(ncharge-1):
ccharge = charge+1
bcharge = charge
for c, b in itertools.product(statesdm[ccharge], statesdm[bcharge]):
cb = si.get_ind_dm1(c, b, bcharge)
for l in range(nleads):
current_l, energy_current_l = 0, 0
for bp in statesdm[bcharge]:
if not kh.is_included(bp, b, bcharge):
continue
phi0bpb = kh.get_phi0_element(bp, b, bcharge)
cbp = si.get_ind_dm1(c, bp, bcharge)
fct1 = phi1fct[l, cbp, 0]
fct1h = phi1fct_energy[l, cbp, 0]
phi1[l, cb] += Tba[l, c, bp]*phi0bpb*fct1
current_l += Tba[l, b, c]*Tba[l, c, bp]*phi0bpb*fct1
energy_current_l += Tba[l, b, c]*Tba[l, c, bp]*phi0bpb*fct1h
for cp in statesdm[ccharge]:
if not kh.is_included(c, cp, ccharge):
continue
phi0ccp = kh.get_phi0_element(c, cp, ccharge)
cpb = si.get_ind_dm1(cp, b, bcharge)
fct2 = phi1fct[l, cpb, 1]
fct2h = phi1fct_energy[l, cpb, 1]
phi1[l, cb] += Tba[l, cp, b]*phi0ccp*fct2
current_l += Tba[l, b, c]*phi0ccp*Tba[l, cp, b]*fct2
energy_current_l += Tba[l, b, c]*phi0ccp*Tba[l, cp, b]*fct2h
current[l] += -2*current_l.imag
energy_current[l] += -2*energy_current_l.imag
self.heat_current[:] = energy_current - current*self.leads.mulst
# ---------------------------------------------------------------------------------------------------
| bsd-2-clause | -8,804,896,393,798,070,000 | 43.507353 | 101 | 0.452999 | false |
invenia/Arbiter | setup.py | 1 | 1178 | """
to install:
python setup.py install
"""
from setuptools import setup
setup(
name="arbiter",
description="A task-dependency solver",
long_description=open('README.rst').read(),
version="0.4.0",
author="Brendan Curran-Johnson",
author_email="[email protected]",
license="MIT License",
url="https://github.com/invenia/Arbiter",
packages=(
"arbiter",
),
install_requires=(
'enum34',
'futures',
),
tests_require=(
'coverage',
'nose',
'python-coveralls',
),
classifiers=(
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
),
)
| mit | 2,089,709,813,790,238,000 | 24.06383 | 70 | 0.573854 | false |
ak15199/rop | art/panimage.py | 1 | 2671 | from ._baseclass import ArtBaseClass
import requests
try:
from thread import start_new_thread, allocate_lock
except:
from _thread import start_new_thread, allocate_lock
import logging
from opc.image import Image
def position(matrix, image):
x = 0
y = 0
while y < (image.height-matrix.height):
while x < image.width-matrix.width:
yield x, y
x += 1
dy = 0
while dy < matrix.height and y < (image.height-matrix.height):
yield x, y
y += 1
dy += 1
while x > 0 and y < (image.height-matrix.height):
yield x, y
x -= 1
dy = 0
while dy < matrix.height and y < (image.height-matrix.height):
yield x, y
y += 1
dy += 1
class Art(ArtBaseClass):
description = "Grab random images from the internet and pan over them"
def __init__(self, matrix, config):
w = matrix.width*16
h = matrix.height*16
self.url = "http://lorempixel.com/%s/%d/" % (w, h)
self.image_active = None
self._load()
def start(self, matrix):
matrix.clear()
def _load(self):
self.image_loaded = None
start_new_thread(Art._loadthread, (self,))
def _consume(self, matrix):
if not self.image_loaded:
return False
self.image_active = self.image_loaded
self._load()
self.position = position(matrix, self.image_active)
return True
def _loadthread(self):
logging.info("_loadthread begin")
try:
r = requests.get(self.url)
if r.status_code == 200:
self.image_loaded = Image(bytestream=r.content)
else:
logging.error("_loadthread code %d, using fallback"%r.status_code)
self.image_loaded = Image(filename="assets/images/lena.jpg")
except Exception as e:
logging.error("_loadthread exception '%s', using fallback"%str(e))
self.image_loaded = Image(filename="assets/images/lena.jpg")
def refresh(self, matrix):
if not self.image_active: # no image is active
if not self._consume(matrix): # can we get a fresh image?
return # return and re-try next cycle if still pending
try:
x, y = next(self.position)
except:
self.image_active = False # borked over image end
return # try and load new image next cycle
buf = self.image_active.translate(matrix, scale=1, x=x, y=y)
matrix.copyBuffer(buf)
def interval(self):
return 40
| gpl-3.0 | -6,688,805,793,518,084,000 | 25.979798 | 84 | 0.561587 | false |
nuagenetworks/vspk-python | vspk/v6/nuallredundancygroup.py | 1 | 24239 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUAllRedundancyGroup(NURESTObject):
""" Represents a AllRedundancyGroup in the VSD
Notes:
A read only API to get all redundancy gateway objects in the VSD environment. Use the ID field to then actually manage the redundancy gateway using the redundancy gateway API entity.
"""
__rest_name__ = "allredundancygroup"
__resource_name__ = "allredundancygroups"
## Constants
CONST_PERSONALITY_EVDFB = "EVDFB"
CONST_PERSONALITY_EVDF = "EVDF"
CONST_PERSONALITY_NUAGE_210_WBX_32_Q = "NUAGE_210_WBX_32_Q"
CONST_PERSONALITY_NSGDUC = "NSGDUC"
CONST_PERSONALITY_OTHER = "OTHER"
CONST_PERSONALITY_VDFG = "VDFG"
CONST_PERSONALITY_NSG = "NSG"
CONST_PERMITTED_ACTION_EXTEND = "EXTEND"
CONST_PERMITTED_ACTION_INSTANTIATE = "INSTANTIATE"
CONST_PERSONALITY_DC7X50 = "DC7X50"
CONST_REDUNDANT_GATEWAY_STATUS_FAILED = "FAILED"
CONST_PERSONALITY_HARDWARE_VTEP = "HARDWARE_VTEP"
CONST_PERSONALITY_VSA = "VSA"
CONST_PERMITTED_ACTION_USE = "USE"
CONST_PERSONALITY_VSG = "VSG"
CONST_PERMITTED_ACTION_READ = "READ"
CONST_PERSONALITY_VRSB = "VRSB"
CONST_REDUNDANT_GATEWAY_STATUS_SUCCESS = "SUCCESS"
CONST_PERSONALITY_NETCONF_7X50 = "NETCONF_7X50"
CONST_PERSONALITY_NUAGE_210_WBX_48_S = "NUAGE_210_WBX_48_S"
CONST_PERSONALITY_VRSG = "VRSG"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_PERSONALITY_NETCONF_THIRDPARTY_HW_VTEP = "NETCONF_THIRDPARTY_HW_VTEP"
CONST_PERMITTED_ACTION_ALL = "ALL"
CONST_PERMITTED_ACTION_DEPLOY = "DEPLOY"
CONST_PERSONALITY_NSGBR = "NSGBR"
def __init__(self, **kwargs):
""" Initializes a AllRedundancyGroup instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> allredundancygroup = NUAllRedundancyGroup(id=u'xxxx-xxx-xxx-xxx', name=u'AllRedundancyGroup')
>>> allredundancygroup = NUAllRedundancyGroup(data=my_dict)
"""
super(NUAllRedundancyGroup, self).__init__()
# Read/Write Attributes
self._name = None
self._last_updated_by = None
self._last_updated_date = None
self._gateway_peer1_autodiscovered_gateway_id = None
self._gateway_peer1_connected = None
self._gateway_peer1_id = None
self._gateway_peer1_name = None
self._gateway_peer2_autodiscovered_gateway_id = None
self._gateway_peer2_connected = None
self._gateway_peer2_name = None
self._redundant_gateway_status = None
self._permitted_action = None
self._personality = None
self._description = None
self._embedded_metadata = None
self._enterprise_id = None
self._entity_scope = None
self._creation_date = None
self._vtep = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway_peer1_autodiscovered_gateway_id", remote_name="gatewayPeer1AutodiscoveredGatewayID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway_peer1_connected", remote_name="gatewayPeer1Connected", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway_peer1_id", remote_name="gatewayPeer1ID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway_peer1_name", remote_name="gatewayPeer1Name", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway_peer2_autodiscovered_gateway_id", remote_name="gatewayPeer2AutodiscoveredGatewayID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway_peer2_connected", remote_name="gatewayPeer2Connected", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway_peer2_name", remote_name="gatewayPeer2Name", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="redundant_gateway_status", remote_name="redundantGatewayStatus", attribute_type=str, is_required=False, is_unique=False, choices=[u'FAILED', u'SUCCESS'])
self.expose_attribute(local_name="permitted_action", remote_name="permittedAction", attribute_type=str, is_required=False, is_unique=False, choices=[u'ALL', u'DEPLOY', u'EXTEND', u'INSTANTIATE', u'READ', u'USE'])
self.expose_attribute(local_name="personality", remote_name="personality", attribute_type=str, is_required=False, is_unique=False, choices=[u'DC7X50', u'EVDF', u'EVDFB', u'HARDWARE_VTEP', u'NETCONF_7X50', u'NETCONF_THIRDPARTY_HW_VTEP', u'NSG', u'NSGBR', u'NSGDUC', u'NUAGE_210_WBX_32_Q', u'NUAGE_210_WBX_48_S', u'OTHER', u'VDFG', u'VRSB', u'VRSG', u'VSA', u'VSG'])
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="enterprise_id", remote_name="enterpriseID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="vtep", remote_name="vtep", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
Name of the Redundancy Group
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of the Redundancy Group
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def gateway_peer1_autodiscovered_gateway_id(self):
""" Get gateway_peer1_autodiscovered_gateway_id value.
Notes:
The Auto Discovered Gateway configuration owner in this Redundant Group.
This attribute is named `gatewayPeer1AutodiscoveredGatewayID` in VSD API.
"""
return self._gateway_peer1_autodiscovered_gateway_id
@gateway_peer1_autodiscovered_gateway_id.setter
def gateway_peer1_autodiscovered_gateway_id(self, value):
""" Set gateway_peer1_autodiscovered_gateway_id value.
Notes:
The Auto Discovered Gateway configuration owner in this Redundant Group.
This attribute is named `gatewayPeer1AutodiscoveredGatewayID` in VSD API.
"""
self._gateway_peer1_autodiscovered_gateway_id = value
@property
def gateway_peer1_connected(self):
""" Get gateway_peer1_connected value.
Notes:
Indicates status of the authoritative gateway of this Redundancy Group.
This attribute is named `gatewayPeer1Connected` in VSD API.
"""
return self._gateway_peer1_connected
@gateway_peer1_connected.setter
def gateway_peer1_connected(self, value):
""" Set gateway_peer1_connected value.
Notes:
Indicates status of the authoritative gateway of this Redundancy Group.
This attribute is named `gatewayPeer1Connected` in VSD API.
"""
self._gateway_peer1_connected = value
@property
def gateway_peer1_id(self):
""" Get gateway_peer1_id value.
Notes:
The gateway configuration owner in this Redundant Group. when Redundant Group is deleted this gateway will recieve vport associations
This attribute is named `gatewayPeer1ID` in VSD API.
"""
return self._gateway_peer1_id
@gateway_peer1_id.setter
def gateway_peer1_id(self, value):
""" Set gateway_peer1_id value.
Notes:
The gateway configuration owner in this Redundant Group. when Redundant Group is deleted this gateway will recieve vport associations
This attribute is named `gatewayPeer1ID` in VSD API.
"""
self._gateway_peer1_id = value
@property
def gateway_peer1_name(self):
""" Get gateway_peer1_name value.
Notes:
The gateway configuration owner name in this Redundant Group
This attribute is named `gatewayPeer1Name` in VSD API.
"""
return self._gateway_peer1_name
@gateway_peer1_name.setter
def gateway_peer1_name(self, value):
""" Set gateway_peer1_name value.
Notes:
The gateway configuration owner name in this Redundant Group
This attribute is named `gatewayPeer1Name` in VSD API.
"""
self._gateway_peer1_name = value
@property
def gateway_peer2_autodiscovered_gateway_id(self):
""" Get gateway_peer2_autodiscovered_gateway_id value.
Notes:
The Auto Discovered Gateway peer in this Redundant Group
This attribute is named `gatewayPeer2AutodiscoveredGatewayID` in VSD API.
"""
return self._gateway_peer2_autodiscovered_gateway_id
@gateway_peer2_autodiscovered_gateway_id.setter
def gateway_peer2_autodiscovered_gateway_id(self, value):
""" Set gateway_peer2_autodiscovered_gateway_id value.
Notes:
The Auto Discovered Gateway peer in this Redundant Group
This attribute is named `gatewayPeer2AutodiscoveredGatewayID` in VSD API.
"""
self._gateway_peer2_autodiscovered_gateway_id = value
@property
def gateway_peer2_connected(self):
""" Get gateway_peer2_connected value.
Notes:
Indicates status of the secondary gateway of this Redundancy Group.
This attribute is named `gatewayPeer2Connected` in VSD API.
"""
return self._gateway_peer2_connected
@gateway_peer2_connected.setter
def gateway_peer2_connected(self, value):
""" Set gateway_peer2_connected value.
Notes:
Indicates status of the secondary gateway of this Redundancy Group.
This attribute is named `gatewayPeer2Connected` in VSD API.
"""
self._gateway_peer2_connected = value
@property
def gateway_peer2_name(self):
""" Get gateway_peer2_name value.
Notes:
The gateway peer name in this Redundant Group
This attribute is named `gatewayPeer2Name` in VSD API.
"""
return self._gateway_peer2_name
@gateway_peer2_name.setter
def gateway_peer2_name(self, value):
""" Set gateway_peer2_name value.
Notes:
The gateway peer name in this Redundant Group
This attribute is named `gatewayPeer2Name` in VSD API.
"""
self._gateway_peer2_name = value
@property
def redundant_gateway_status(self):
""" Get redundant_gateway_status value.
Notes:
The status of Redundant Group, possible values are FAILED, SUCCESS Possible values are FAILED, SUCCESS, .
This attribute is named `redundantGatewayStatus` in VSD API.
"""
return self._redundant_gateway_status
@redundant_gateway_status.setter
def redundant_gateway_status(self, value):
""" Set redundant_gateway_status value.
Notes:
The status of Redundant Group, possible values are FAILED, SUCCESS Possible values are FAILED, SUCCESS, .
This attribute is named `redundantGatewayStatus` in VSD API.
"""
self._redundant_gateway_status = value
@property
def permitted_action(self):
""" Get permitted_action value.
Notes:
The permitted action to USE/EXTEND this Gateway Possible values are USE, READ, ALL, INSTANTIATE, EXTEND, DEPLOY, .
This attribute is named `permittedAction` in VSD API.
"""
return self._permitted_action
@permitted_action.setter
def permitted_action(self, value):
""" Set permitted_action value.
Notes:
The permitted action to USE/EXTEND this Gateway Possible values are USE, READ, ALL, INSTANTIATE, EXTEND, DEPLOY, .
This attribute is named `permittedAction` in VSD API.
"""
self._permitted_action = value
@property
def personality(self):
""" Get personality value.
Notes:
derived personality of the Redundancy Group - VSG,VRSG,NSG,OTHER Possible values are VSG, VSA, VRSG, VDFG, DC7X50, NSG, HARDWARE_VTEP, OTHER, .
"""
return self._personality
@personality.setter
def personality(self, value):
""" Set personality value.
Notes:
derived personality of the Redundancy Group - VSG,VRSG,NSG,OTHER Possible values are VSG, VSA, VRSG, VDFG, DC7X50, NSG, HARDWARE_VTEP, OTHER, .
"""
self._personality = value
@property
def description(self):
""" Get description value.
Notes:
Description of the Redundancy Group
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
Description of the Redundancy Group
"""
self._description = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def enterprise_id(self):
""" Get enterprise_id value.
Notes:
The enterprise associated with this Redundant Group. This is a read only attribute
This attribute is named `enterpriseID` in VSD API.
"""
return self._enterprise_id
@enterprise_id.setter
def enterprise_id(self, value):
""" Set enterprise_id value.
Notes:
The enterprise associated with this Redundant Group. This is a read only attribute
This attribute is named `enterpriseID` in VSD API.
"""
self._enterprise_id = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def vtep(self):
""" Get vtep value.
Notes:
Represent the system ID or the Virtual IP of a service used by a Gateway (VSG for now) to establish a tunnel with a remote VSG or hypervisor. The format of this field is consistent with an IP address.
"""
return self._vtep
@vtep.setter
def vtep(self, value):
""" Set vtep value.
Notes:
Represent the system ID or the Virtual IP of a service used by a Gateway (VSG for now) to establish a tunnel with a remote VSG or hypervisor. The format of this field is consistent with an IP address.
"""
self._vtep = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| bsd-3-clause | 8,053,294,178,348,660,000 | 31.890095 | 372 | 0.601551 | false |
waisuan/minblog_af | minblog2/helpers/databaseManager.py | 1 | 6726 | from pymongo import MongoClient
import time
from bson import ObjectId
class DatabaseManager:
# DB set-up
def __init__(self):
client = MongoClient()
db = client.flaskr
self.entries_col = db.entries
# sort by [default] descending/latest order
def get_all_entries(self, sort_by=-1):
all_entries = self.entries_col.find().sort([('_id', sort_by)])
entries_as_dict = [
dict(
entry_id = str(entry.get('_id', '9999')),
creator = entry.get('creator', '????'),
created_on_date = entry.get('created_on_date', '????'),
created_on_time = entry.get('created_on_time', '????'),
entry_title = entry.get('entry_title', '????'),
entry_text = entry.get('entry_text', '????'),
quick_text = entry.get('quick_text', entry.get('entry_text')),
modified_on_date = entry.get('modified_on_date', '????'),
modified_on_time = entry.get('modified_on_time', '????'),
is_modified = entry.get('is_modified', False)
) for entry in all_entries
]
return entries_as_dict
def get_entry_count(self):
all_entries = self.entries_col.find()
entries_as_list = [ entry for entry in all_entries ]
return len(entries_as_list)
def get_entries_by_page(self, direction, last_entry_id, limit, sort_by=-1):
if direction == '+':
direction = '$gt'
else:
direction = '$lt'
print direction, last_entry_id, sort_by
# $natural == natural order
curr_entries = self.entries_col.find({'_id': {direction: ObjectId(last_entry_id)}}).sort([('$natural', int(sort_by))]).limit(int(limit))
entries_as_dict = [
dict(
entry_id = str(entry.get('_id', '9999')),
creator = entry.get('creator', '????'),
created_on_date = entry.get('created_on_date', '????'),
created_on_time = entry.get('created_on_time', '????'),
entry_title = entry.get('entry_title', '????'),
entry_text = entry.get('entry_text', '????'),
quick_text = entry.get('quick_text', entry.get('entry_text')),
modified_on_date = entry.get('modified_on_date', '????'),
modified_on_time = entry.get('modified_on_time', '????'),
is_modified = entry.get('is_modified', False)
) for entry in curr_entries
]
return entries_as_dict
def get_entry_by_id(self, entry_id):
entry = self.entries_col.find_one({'_id': ObjectId(entry_id)})
if len(entry) == 0:
return {}
entry = dict(
entry_id = str(entry.get('_id', '9999')),
creator = entry.get('creator', '????'),
created_on_date = entry.get('created_on_date', '????'),
created_on_time = entry.get('created_on_time', '????'),
entry_title = entry.get('entry_title', '????'),
entry_text = entry.get('entry_text', '????'),
quick_text = entry.get('quick_text', entry.get('entry_text')),
modified_on_date = entry.get('modified_on_date', '????'),
modified_on_time = entry.get('modified_on_time', '????'),
is_modified = entry.get('is_modified', False)
)
return entry
def create_new_entry(self, newEntryTitle, newEntryText, newQuickText):
now_date = time.strftime("%d/%m/%Y")
now_time = time.strftime("%I:%M %p")
insert_result = self.entries_col.insert_one({
'creator' : 'admin',
'created_on_date' : now_date,
'created_on_time' : now_time,
'entry_title' : newEntryTitle,
'entry_text' : newEntryText,
'quick_text' : newQuickText,
'modified_on_date' : now_date,
'modified_on_time' : now_time,
'is_modified' : False
})
return str(insert_result.inserted_id) # Original _id type is ObjectId
def update_entry(self, entry_id, updatedEntryTitle, updatedEntryText, updatedQuickText):
now_date = time.strftime("%d/%m/%Y")
now_time = time.strftime("%I:%M %p")
update_result = self.entries_col.update_one({'_id': ObjectId(entry_id)},
{'$set': { 'entry_title' : updatedEntryTitle,
'entry_text' : updatedEntryText,
'quick_text' : updatedQuickText,
'modified_on_date' : now_date,
'modified_on_time' : now_time,
'is_modified' : True
}
})
return update_result.modified_count
def delete_entry(self, entry_id):
del_result = self.entries_col.delete_one({'_id': ObjectId(entry_id)})
return del_result.deleted_count
| mit | 3,785,670,568,057,706,500 | 56.487179 | 144 | 0.381505 | false |
haku86/happyowlweb | happyowlweb/happyowlweb/urls.py | 1 | 1074 | from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings
from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', TemplateView.as_view(template_name='index.html')),
# Examples:
# url(r'^$', 'happyowlweb.views.home', name='home'),
# url(r'^happyowlweb/', include('happyowlweb.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
# Uncomment the next line to serve media files in dev.
# urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
| mit | 1,547,584,701,885,924,400 | 33.645161 | 78 | 0.675978 | false |
voidfiles/extraction | extraction/tests/tests.py | 1 | 7513 | import unittest
import extraction
from extraction.tests.data import *
from extraction.examples.new_return_type import AddressExtractor
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.extractor = extraction.Extractor()
def test_rewriting_relative_urls(self):
"Test rewriting relative URLs as absolute URLs if source_url is specified."
self.extractor.techniques = ["extraction.examples.custom_technique.LethainComTechnique"]
# without source_url, stays as a relative URL
extracted = self.extractor.extract(LETHAIN_COM_HTML)
self.assertEqual(extracted.image, "/static/blog/digg_v4/initial_org.png")
# with source_url, should rewrite as an absolute path
extracted = self.extractor.extract(LETHAIN_COM_HTML, source_url="http://lethain.com/digg-v4-architecture-process/")
# rewrites /static/blog/digg_v4/initial_org.png
self.assertEqual(extracted.images[0], "http://lethain.com/static/blog/digg_v4/initial_org.png")
# rewrites ../digg_v4/initial_org.png
self.assertEqual(extracted.images[1], "http://lethain.com/digg_v4/initial_org.png")
def test_removing_duplicate_values(self):
"We shouldn't suggest the same extracted value multiple times."
extracted = self.extractor.extract(DUPLICATES_HTML)
self.assertEqual(extracted.titles, ["Hi"])
self.assertEqual(extracted.descriptions, ["This is awesome."])
def test_default_techniques(self):
"""
Test running the default techniques list with a simple page.
This is a bit of a high-level test to ensure that the default
techniques aren't completely broken.
"""
extracted = self.extractor.extract(LETHAIN_COM_HTML, source_url="http://lethain.com/digg-v4-architecture-process/")
self.assertTrue(extracted.titles)
self.assertTrue(extracted.urls)
self.assertTrue(extracted.descriptions)
self.assertTrue(extracted.feeds)
def test_default_techniques_on_empty_page(self):
"""
Test running the default techniques list against an empty HTML document.
This is useful for ensuring the defaut techniques fail sanely when they
encounter blank/empty documents.
"""
extracted = self.extractor.extract("")
self.assertFalse(extracted.titles)
self.assertFalse(extracted.urls)
self.assertFalse(extracted.descriptions)
self.assertFalse(extracted.feeds)
def test_technique_facebook_meta_tags(self):
# make sure the shuffled sequence does not lose any elements
self.extractor.techniques = ["extraction.techniques.FacebookOpengraphTags"]
extracted = self.extractor.extract(FACEBOOK_HTML)
self.assertEqual(extracted.title, "The Rock")
self.assertEqual(extracted.titles, ["The Rock"])
self.assertEqual(extracted.url, "http://www.imdb.com/title/tt0117500/")
self.assertEqual(extracted.image, "http://ia.media-imdb.com/rock.jpg")
self.assertEqual(extracted.images, ["http://ia.media-imdb.com/rock.jpg"])
self.assertTrue(extracted.description, "A group of U.S. Marines, under command of a renegade general, take over Alcatraz and threaten San Francisco Bay with biological weapons.")
self.assertEqual(len(extracted.descriptions), 1)
def test_technique_head_tags(self):
"Test extracting page information from HTML head tags (meta, title, ...)."
self.extractor.techniques = ["extraction.techniques.HeadTags"]
extracted = self.extractor.extract(LETHAIN_COM_HTML, source_url="http://lethain.com/digg-v4-architecture-process/")
self.assertEqual(extracted.title, "Digg v4's Architecture and Development Processes - Irrational Exuberance")
self.assertEqual(extracted.url, "http://lethain.com/digg-v4-architecture-process/")
self.assertEqual(extracted.image, None)
self.assertEquals(extracted.description, "Will Larson's blog about programming and other things.")
self.assertEqual(extracted.feed, "http://lethain.com/feeds/")
self.assertEqual(extracted._unexpected_values['authors'], ["Will Larson"])
def test_technique_semantic_tags(self):
"Test extracting data from basic HTML tags like H1, H2, P, and IMG."
self.extractor.techniques = ["extraction.techniques.SemanticTags"]
extracted = self.extractor.extract(LETHAIN_COM_HTML)
self.assertEqual(extracted.title, "Irrational Exuberance")
self.assertEqual(extracted.url, None)
self.assertEqual(extracted.image, "/static/blog/digg_v4/initial_org.png")
self.assertEqual(len(extracted.images), 2)
self.assertEquals(extracted.description.split(), "A month ago history reset with the second launch of Digg v1 , and memories are starting to fade since much of the Digg team joined SocialCode four months ago, so it seemed like a good time to describe the system and team architecture which ran and developed Digg.com from May 2010 until May 2012.".split())
def test_technique_html_semantic_tags(self):
"Test extracting data from an HTML5 page."
self.extractor.techniques = ["extraction.techniques.HTML5SemanticTags"]
extracted = self.extractor.extract(HTML5_HTML)
self.assertEqual(extracted.title, 'This is a title')
self.assertEqual(extracted.description, 'This is a description.')
self.assertEqual(extracted._unexpected_values['videos'], ["this_is_a_video.mp4"])
def test_example_lethain_com_technique(self):
"Test extracting data from lethain.com with a custom technique in extraction.examples."
self.extractor.techniques = ["extraction.examples.custom_technique.LethainComTechnique"]
extracted = self.extractor.extract(LETHAIN_COM_HTML)
self.assertEqual(extracted.title, "Digg v4's Architecture and Development Processes")
self.assertEqual(extracted.url, None)
self.assertEqual(extracted._unexpected_values['tags'], [u'architecture', u'digg'])
self.assertEqual(extracted._unexpected_values['dates'], [u'08/19/2012'])
self.assertEqual(extracted.image, "/static/blog/digg_v4/initial_org.png")
self.assertEqual(len(extracted.images), 2)
self.assertEquals(extracted.description.split(), "A month ago history reset with the second launch of Digg v1 , and memories are starting to fade since much of the Digg team joined SocialCode four months ago, so it seemed like a good time to describe the system and team architecture which ran and developed Digg.com from May 2010 until May 2012.".split())
def test_example_new_return_type(self):
"Test returning a non-standard datatype, in this case addresses."
self.extractor = AddressExtractor()
self.extractor.techniques = ["extraction.examples.new_return_type.AddressTechnique"]
extracted = self.extractor.extract(WILLARSON_COM_HTML)
self.assertEqual(extracted.address, "Cole Valley San Francisco, CA USA")
self.assertEqual(extracted.url, None)
self.assertEqual(extracted.title, None)
self.assertEqual(extracted.description, None)
self.assertEqual(extracted.image, None)
def test_empty_title(self):
"Test that HTML with an empty title sets first h1 heading as title."
extracted = self.extractor.extract(EMPTY_TITLE_HTML)
self.assertEqual(extracted.title, "H1")
if __name__ == '__main__':
unittest.main()
| mit | 5,654,097,852,145,496,000 | 57.695313 | 364 | 0.710369 | false |
shanew/electrum | lib/paymentrequest.py | 1 | 14954 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import hashlib
import os.path
import re
import sys
import threading
import time
import traceback
import urlparse
import json
import requests
try:
import paymentrequest_pb2 as pb2
except ImportError:
sys.exit("Error: could not find paymentrequest_pb2.py. Create it with 'protoc --proto_path=lib/ --python_out=lib/ lib/paymentrequest.proto'")
import bitcoin
import util
from util import print_error
import transaction
import x509
import rsakey
REQUEST_HEADERS = {'Accept': 'application/bitcoin-paymentrequest', 'User-Agent': 'Electrum'}
ACK_HEADERS = {'Content-Type':'application/bitcoin-payment','Accept':'application/bitcoin-paymentack','User-Agent':'Electrum'}
ca_path = requests.certs.where()
ca_list, ca_keyID = x509.load_certificates(ca_path)
# status of payment requests
PR_UNPAID = 0
PR_EXPIRED = 1
PR_UNKNOWN = 2 # sent but not propagated
PR_PAID = 3 # send and propagated
PR_ERROR = 4 # could not parse
def get_payment_request(url):
u = urlparse.urlparse(url)
if u.scheme in ['http', 'https']:
response = requests.request('GET', url, headers=REQUEST_HEADERS)
data = response.content
print_error('fetched payment request', url, len(data))
elif u.scheme == 'file':
with open(u.path, 'r') as f:
data = f.read()
else:
raise BaseException("unknown scheme", url)
pr = PaymentRequest(data)
return pr
class PaymentRequest:
def __init__(self, data):
self.raw = data
self.parse(data)
self.requestor = None # known after verify
self.tx = None
def __str__(self):
return self.raw
def parse(self, r):
self.id = bitcoin.sha256(r)[0:16].encode('hex')
try:
self.data = pb2.PaymentRequest()
self.data.ParseFromString(r)
except:
self.error = "cannot parse payment request"
return
self.details = pb2.PaymentDetails()
self.details.ParseFromString(self.data.serialized_payment_details)
self.outputs = []
for o in self.details.outputs:
addr = transaction.get_address_from_output_script(o.script)[1]
self.outputs.append(('address', addr, o.amount))
self.memo = self.details.memo
self.payment_url = self.details.payment_url
def verify(self, contacts):
if not self.raw:
self.error = "Empty request"
return
pr = pb2.PaymentRequest()
pr.ParseFromString(self.raw)
if not pr.signature:
self.error = "No signature"
return
if pr.pki_type in ["x509+sha256", "x509+sha1"]:
return self.verify_x509(pr)
elif pr.pki_type in ["dnssec+btc", "dnssec+ecdsa"]:
return self.verify_dnssec(pr, contacts)
else:
self.error = "ERROR: Unsupported PKI Type for Message Signature"
return False
def verify_x509(self, paymntreq):
if not ca_list:
self.error = "Trusted certificate authorities list not found"
return False
cert = pb2.X509Certificates()
cert.ParseFromString(paymntreq.pki_data)
# verify the chain of certificates
try:
x, ca = verify_cert_chain(cert.certificate)
except BaseException as e:
self.error = str(e)
return False
# get requestor name
self.requestor = x.get_common_name()
if self.requestor.startswith('*.'):
self.requestor = self.requestor[2:]
# verify the BIP70 signature
pubkey0 = rsakey.RSAKey(x.modulus, x.exponent)
sig = paymntreq.signature
paymntreq.signature = ''
s = paymntreq.SerializeToString()
sigBytes = bytearray(sig)
msgBytes = bytearray(s)
if paymntreq.pki_type == "x509+sha256":
hashBytes = bytearray(hashlib.sha256(msgBytes).digest())
verify = pubkey0.verify(sigBytes, x509.PREFIX_RSA_SHA256 + hashBytes)
elif paymntreq.pki_type == "x509+sha1":
verify = pubkey0.hashAndVerify(sigBytes, msgBytes)
if not verify:
self.error = "ERROR: Invalid Signature for Payment Request Data"
return False
### SIG Verified
self.error = 'Signed by Trusted CA: ' + ca.get_common_name()
return True
def verify_dnssec(self, pr, contacts):
sig = pr.signature
alias = pr.pki_data
info = contacts.resolve(alias)
if info.get('validated') is not True:
self.error = "Alias verification failed (DNSSEC)"
return False
if pr.pki_type == "dnssec+btc":
self.requestor = alias
address = info.get('address')
pr.signature = ''
message = pr.SerializeToString()
if bitcoin.verify_message(address, sig, message):
self.error = 'Verified with DNSSEC'
return True
else:
self.error = "verify failed"
return False
else:
self.error = "unknown algo"
return False
def has_expired(self):
return self.details.expires and self.details.expires < int(time.time())
def get_expiration_date(self):
return self.details.expires
def get_amount(self):
return sum(map(lambda x:x[2], self.outputs))
def get_requestor(self):
return self.requestor if self.requestor else 'unknown'
def get_verify_status(self):
return self.error
def get_memo(self):
return self.memo
def get_id(self):
return self.id
def get_outputs(self):
return self.outputs[:]
def send_ack(self, raw_tx, refund_addr):
pay_det = self.details
if not self.details.payment_url:
return False, "no url"
paymnt = pb2.Payment()
paymnt.merchant_data = pay_det.merchant_data
paymnt.transactions.append(raw_tx)
ref_out = paymnt.refund_to.add()
ref_out.script = transaction.Transaction.pay_script('address', refund_addr)
paymnt.memo = "Paid using Electrum"
pm = paymnt.SerializeToString()
payurl = urlparse.urlparse(pay_det.payment_url)
try:
r = requests.post(payurl.geturl(), data=pm, headers=ACK_HEADERS, verify=ca_path)
except requests.exceptions.SSLError:
print "Payment Message/PaymentACK verify Failed"
try:
r = requests.post(payurl.geturl(), data=pm, headers=ACK_HEADERS, verify=False)
except Exception as e:
print e
return False, "Payment Message/PaymentACK Failed"
if r.status_code >= 500:
return False, r.reason
try:
paymntack = pb2.PaymentACK()
paymntack.ParseFromString(r.content)
except Exception:
return False, "PaymentACK could not be processed. Payment was sent; please manually verify that payment was received."
print "PaymentACK message received: %s" % paymntack.memo
return True, paymntack.memo
def make_unsigned_request(req):
from transaction import Transaction
addr = req['address']
time = req.get('time', 0)
exp = req.get('exp', 0)
if time and type(time) != int:
time = 0
if exp and type(exp) != int:
exp = 0
amount = req['amount']
if amount is None:
amount = 0
memo = req['memo']
script = Transaction.pay_script('address', addr).decode('hex')
outputs = [(script, amount)]
pd = pb2.PaymentDetails()
for script, amount in outputs:
pd.outputs.add(amount=amount, script=script)
pd.time = time
pd.expires = time + exp if exp else 0
pd.memo = memo
pr = pb2.PaymentRequest()
pr.serialized_payment_details = pd.SerializeToString()
pr.signature = ''
return pr
def sign_request_with_alias(pr, alias, alias_privkey):
pr.pki_type = 'dnssec+btc'
pr.pki_data = str(alias)
message = pr.SerializeToString()
ec_key = bitcoin.regenerate_key(alias_privkey)
address = bitcoin.address_from_private_key(alias_privkey)
compressed = bitcoin.is_compressed(alias_privkey)
pr.signature = ec_key.sign_message(message, compressed, address)
def verify_cert_chain(chain):
""" Verify a chain of certificates. The last certificate is the CA"""
# parse the chain
cert_num = len(chain)
x509_chain = []
for i in range(cert_num):
x = x509.X509(bytearray(chain[i]))
x509_chain.append(x)
if i == 0:
x.check_date()
else:
if not x.check_ca():
raise BaseException("ERROR: Supplied CA Certificate Error")
if not cert_num > 1:
raise BaseException("ERROR: CA Certificate Chain Not Provided by Payment Processor")
# if the root CA is not supplied, add it to the chain
ca = x509_chain[cert_num-1]
if ca.getFingerprint() not in ca_list:
keyID = ca.get_issuer_keyID()
f = ca_keyID.get(keyID)
if f:
root = ca_list[f]
x509_chain.append(root)
else:
raise BaseException("Supplied CA Not Found in Trusted CA Store.")
# verify the chain of signatures
cert_num = len(x509_chain)
for i in range(1, cert_num):
x = x509_chain[i]
prev_x = x509_chain[i-1]
algo, sig, data = prev_x.get_signature()
sig = bytearray(sig)
pubkey = rsakey.RSAKey(x.modulus, x.exponent)
if algo == x509.ALGO_RSA_SHA1:
verify = pubkey.hashAndVerify(sig, data)
elif algo == x509.ALGO_RSA_SHA256:
hashBytes = bytearray(hashlib.sha256(data).digest())
verify = pubkey.verify(sig, x509.PREFIX_RSA_SHA256 + hashBytes)
elif algo == x509.ALGO_RSA_SHA384:
hashBytes = bytearray(hashlib.sha384(data).digest())
verify = pubkey.verify(sig, x509.PREFIX_RSA_SHA384 + hashBytes)
elif algo == x509.ALGO_RSA_SHA512:
hashBytes = bytearray(hashlib.sha512(data).digest())
verify = pubkey.verify(sig, x509.PREFIX_RSA_SHA512 + hashBytes)
else:
raise BaseException("Algorithm not supported")
util.print_error(self.error, algo.getComponentByName('algorithm'))
if not verify:
raise BaseException("Certificate not Signed by Provided CA Certificate Chain")
return x509_chain[0], ca
def check_ssl_config(config):
import pem
key_path = config.get('ssl_privkey')
cert_path = config.get('ssl_chain')
with open(key_path, 'r') as f:
params = pem.parse_private_key(f.read())
with open(cert_path, 'r') as f:
s = f.read()
bList = pem.dePemList(s, "CERTIFICATE")
# verify chain
x, ca = verify_cert_chain(bList)
# verify that privkey and pubkey match
privkey = rsakey.RSAKey(*params)
pubkey = rsakey.RSAKey(x.modulus, x.exponent)
assert x.modulus == params[0]
assert x.exponent == params[1]
# return requestor
requestor = x.get_common_name()
if requestor.startswith('*.'):
requestor = requestor[2:]
return requestor
def sign_request_with_x509(pr, key_path, cert_path):
import pem
with open(key_path, 'r') as f:
params = pem.parse_private_key(f.read())
privkey = rsakey.RSAKey(*params)
with open(cert_path, 'r') as f:
s = f.read()
bList = pem.dePemList(s, "CERTIFICATE")
certificates = pb2.X509Certificates()
certificates.certificate.extend(map(str, bList))
pr.pki_type = 'x509+sha256'
pr.pki_data = certificates.SerializeToString()
msgBytes = bytearray(pr.SerializeToString())
hashBytes = bytearray(hashlib.sha256(msgBytes).digest())
sig = privkey.sign(x509.PREFIX_RSA_SHA256 + hashBytes)
pr.signature = bytes(sig)
def serialize_request(req):
pr = make_unsigned_request(req)
signature = req.get('sig')
requestor = req.get('name')
if requestor and signature:
pr.signature = signature.decode('hex')
pr.pki_type = 'dnssec+btc'
pr.pki_data = str(requestor)
return pr
def make_request(config, req):
pr = make_unsigned_request(req)
key_path = config.get('ssl_privkey')
cert_path = config.get('ssl_chain')
if key_path and cert_path:
sign_request_with_x509(pr, key_path, cert_path)
return pr
class InvoiceStore(object):
def __init__(self, config):
self.config = config
self.invoices = {}
self.load_invoices()
def load_invoices(self):
path = os.path.join(self.config.path, 'invoices')
try:
with open(path, 'r') as f:
d = json.loads(f.read())
except:
return
for k, v in d.items():
try:
pr = PaymentRequest(v.get('hex').decode('hex'))
pr.tx = v.get('txid')
pr.requestor = v.get('requestor')
self.invoices[k] = pr
except:
continue
def save(self):
l = {}
for k, pr in self.invoices.items():
l[k] = {
'hex': str(pr).encode('hex'),
'requestor': pr.get_requestor(),
'txid': pr.tx
}
path = os.path.join(self.config.path, 'invoices')
with open(path, 'w') as f:
s = json.dumps(l, indent=4, sort_keys=True)
r = f.write(s)
def get_status(self, key):
pr = self.get(key)
if pr.tx is not None:
return PR_PAID
if pr.has_expired():
return PR_EXPIRED
return PR_UNPAID
def add(self, pr):
key = pr.get_id()
if key in self.invoices:
print_error('invoice already in list')
return key
self.invoices[key] = pr
self.save()
return key
def remove(self, key):
self.invoices.pop(key)
self.save()
def get(self, k):
return self.invoices.get(k)
def set_paid(self, key, tx_hash):
self.invoices[key].tx = tx_hash
self.save()
def sorted_list(self):
# sort
return self.invoices.values()
| gpl-3.0 | 809,697,329,757,662,100 | 31.438178 | 145 | 0.604922 | false |
bgroff/kala-app | django_kala/api/basecamp_classic/projects/serializers.py | 1 | 1207 | from rest_framework import serializers
from projects.models import Project, Category
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = Project
fields = [
'id', 'name', 'organization'
]
def create(self, validated_data):
return Project.objects.create(**validated_data)
def validate_name(self, value):
# At least try to dedup names
if Project.objects.filter(name__iexact=value):
raise serializers.ValidationError('Name is already in use.')
return value
class CategorySerializer(serializers.ModelSerializer):
def __init__(self, *args, **kwargs):
self.project = kwargs.pop('project')
super(CategorySerializer, self).__init__(*args, **kwargs)
class Meta:
model = Category
fields = [
'id', 'name', 'project'
]
def validate_project(self, project):
if self.project.pk != project.pk:
raise serializers.ValidationError('The project primary key cannot be different from the current project')
return project
def create(self, validated_data):
return Category.objects.create(**validated_data)
| mit | -3,842,473,095,268,577,300 | 29.948718 | 117 | 0.641259 | false |
jsheedy/worldmap-dynamic-zoom | server/db.py | 1 | 1671 | import contextlib
import logging
import time
import psycopg2
import psycopg2.extras as extras
import psycopg2.pool as pool
import config
import queries
connection_pool = pool.ThreadedConnectionPool(config.MINCONN, config.MAXCONN, database=config.DB['DB'], user=config.DB['USER'], password=config.DB['PASSWORD'])
@contextlib.contextmanager
def get_cursor():
try:
for i in range(5): # try a few times
try:
conn = connection_pool.getconn()
logging.debug('got connection. rused: {}'.format(connection_pool._rused))
# cursor = conn.cursor(cursor_factory=extras.DictCursor)
cursor = conn.cursor()
yield cursor
break
except pool.PoolError:
logging.debug('Missed pool connection on try {}'.format(i))
time.sleep(i/5.0)
else:
raise pool.PoolError('connection pool full')
except Exception as e:
if conn:
logging.exception("Closing with rollback")
conn.rollback()
connection_pool.putconn(conn, close=True)
raise
else:
conn.commit()
if conn:
connection_pool.putconn(conn)
def country_list(bbox=None):
with get_cursor() as cursor:
query = queries.db.country_list(bbox)
cursor.execute(query)
yield from cursor
zoom_levels = [(10.0/x**4) for x in range(1, 20)]
def country(id=None, zoom=1):
tolerance = zoom_levels[zoom]
with get_cursor() as cursor:
query = queries.db.country()
cursor.execute(query, [tolerance, id])
return cursor.fetchone()
| unlicense | -171,222,791,807,689,100 | 28.315789 | 159 | 0.608019 | false |
Wolfterro/SVD | src/old/1.0/GlobalVars.py | 1 | 1651 | # -*- coding: utf-8 -*-
'''
The MIT License (MIT)
Copyright (c) 2017 Wolfgang Almeida <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
# Classe de variáveis globais
# ===========================
class GlobalVars:
# Variáveis globais
# -----------------
Version = "1.0"
Ui = None
MainWindow = None
IconName = "Icon.ico"
IconPath = "Icon.ico"
SaveFolder = "SVD"
BinFolder = "bin"
Youtube_dl = ""
PossibleSaveOptions = ["MP3 (Conversão)", "WAV (Conversão)", "MP4",
"WEBM", "MKV", "3GP", "MP4 (Conversão)", "WEBM (Conversão)", "MKV (Conversão)"]
AudioFormats = ["mp3", "wav"] | mit | -2,166,897,040,673,686,500 | 34.577778 | 80 | 0.707421 | false |
UUDigitalHumanitieslab/timealign | annotations/views.py | 1 | 25800 | import os
from collections import defaultdict
from tempfile import NamedTemporaryFile
from lxml import etree
from django.contrib import messages
from django.contrib.admin.utils import construct_change_message
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.db.models import Count, Prefetch, QuerySet
from django.http import HttpResponse, JsonResponse, QueryDict
from django.shortcuts import get_object_or_404
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.http import urlquote
from django.views import generic
from django_filters.views import FilterView
from reversion.models import Version
from reversion.revisions import add_to_revision, set_comment
from reversion.views import RevisionMixin
from core.mixins import ImportMixin, CheckOwnerOrStaff, FluidMixin, SuperuserRequiredMixin
from core.utils import find_in_enum, XLSX
from .exports import export_annotations
from .filters import AnnotationFilter
from .forms import AnnotationForm, LabelImportForm, AddFragmentsForm, FragmentForm
from .mixins import PrepareDownloadMixin, SelectSegmentMixin, ImportFragmentsMixin
from .models import Corpus, SubCorpus, Document, Language, Fragment, Alignment, Annotation, \
TenseCategory, Tense, Source, Sentence, Word, LabelKey
from .utils import get_next_alignment, get_available_corpora, get_xml_sentences, bind_annotations_to_xml, \
natural_sort_key
##############
# Static views
##############
class IntroductionView(generic.TemplateView):
"""
Loads a static introduction view.
"""
template_name = 'annotations/introduction.html'
class InstructionsView(generic.TemplateView):
"""
Loads the various steps of the instructions.
"""
def get_template_names(self):
return 'annotations/instructions{}.html'.format(self.kwargs['n'])
def get_context_data(self, **kwargs):
context = super(InstructionsView, self).get_context_data(**kwargs)
context['is_no_target_title'] = Annotation._meta.get_field('is_no_target').verbose_name.format(
'present perfect')
context['is_translation_title'] = Annotation._meta.get_field('is_translation').verbose_name
return context
class StatusView(PermissionRequiredMixin, generic.TemplateView):
"""
Loads a static home view, with an overview of the annotation progress.
"""
template_name = 'annotations/home.html'
permission_required = 'annotations.change_annotation'
def get_context_data(self, **kwargs):
"""Creates a list of tuples with information on the annotation progress."""
context = super(StatusView, self).get_context_data(**kwargs)
corpus_pk = self.kwargs.get('pk', None)
if corpus_pk:
corpora = [get_object_or_404(Corpus, pk=corpus_pk)]
else:
corpora = get_available_corpora(self.request.user)
# Retrieve the totals per language pair
languages = {language.pk: language for language in Language.objects.all()}
alignments = Alignment.objects.filter(original_fragment__document__corpus__in=corpora)
totals = alignments \
.values('original_fragment__language', 'translated_fragment__language') \
.order_by('original_fragment__language', 'translated_fragment__language') \
.annotate(count=Count('pk'))
completed = {(t.get('original_fragment__language'), t.get('translated_fragment__language')): t.get('count')
for t in totals.exclude(annotation=None)}
# Convert the QuerySets into a list of tuples
language_totals = []
for total in totals:
l1 = languages.get(total['original_fragment__language'])
l2 = languages.get(total['translated_fragment__language'])
complete = completed.get((l1.pk, l2.pk), 0)
available = total['count']
language_totals.append((l1, l2, complete, available))
context['languages'] = language_totals
context['corpus_pk'] = corpus_pk
context['current_corpora'] = corpora
return context
#################
# CRUD Annotation
#################
class AnnotationMixin(SelectSegmentMixin, SuccessMessageMixin, PermissionRequiredMixin):
model = Annotation
form_class = AnnotationForm
permission_required = 'annotations.change_annotation'
def __init__(self):
"""Creates an attribute to cache the Alignment."""
super(AnnotationMixin, self).__init__()
self.alignment = None
def get_form_kwargs(self):
"""Sets the User and the Alignment as a form kwarg."""
kwargs = super(AnnotationMixin, self).get_form_kwargs()
kwargs['user'] = self.request.user
kwargs['alignment'] = self.get_alignment()
kwargs['select_segment'] = self.request.session.get('select_segment', False)
return kwargs
def get_context_data(self, **kwargs):
"""Sets the Alignment on the context."""
context = super(AnnotationMixin, self).get_context_data(**kwargs)
context['alignment'] = self.get_alignment()
return context
def get_alignment(self):
raise NotImplementedError
def get_alignments(self):
"""Retrieve related fields on Alignment to prevent extra queries."""
return Alignment.objects \
.select_related('original_fragment__document__corpus',
'translated_fragment__document__corpus') \
.prefetch_related('original_fragment__sentence_set__word_set',
'translated_fragment__sentence_set__word_set')
class RevisionWithCommentMixin(RevisionMixin):
revision_manage_manually = True
def form_valid(self, form):
result = super().form_valid(form)
if form.changed_data:
add_to_revision(self.object)
set_comment(self.format_change_comment(form.changed_data, form.cleaned_data))
return result
def format_change_for_field(self, field, value):
if isinstance(value, QuerySet):
value = ', '.join(map(str, value))
return '{} to "{}"'.format(field, value)
def format_change_comment(self, changes, values):
parts = []
for change in changes:
parts.append(self.format_change_for_field(change, values[change]))
return 'Changed {}'.format(', '.join(parts))
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['revisions'] = Version.objects.get_for_object(self.object)
return context
class RevisionCreateMixin(RevisionMixin):
def form_valid(self, form):
set_comment('Created annotation')
return super().form_valid(form)
class AnnotationUpdateMixin(AnnotationMixin, CheckOwnerOrStaff, RevisionWithCommentMixin):
def get_context_data(self, **kwargs):
"""Sets the annotated Words on the context."""
context = super(AnnotationUpdateMixin, self).get_context_data(**kwargs)
context['annotated_words'] = self.object.words.all()
return context
def get_success_url(self):
"""Returns to the overview per language."""
alignment = self.get_alignment()
l1 = alignment.original_fragment.language.iso
l2 = alignment.translated_fragment.language.iso
return reverse('annotations:list', args=(l1, l2,))
def get_alignment(self):
"""Retrieves the Alignment from the object."""
if not self.alignment:
self.alignment = self.get_alignments().get(pk=self.object.alignment.pk)
return self.alignment
class AnnotationCreate(AnnotationMixin, RevisionCreateMixin, generic.CreateView):
success_message = 'Annotation created successfully'
def get_success_url(self):
"""Go to the choose-view to select a new Alignment."""
alignment = self.object.alignment
return reverse('annotations:choose', args=(alignment.original_fragment.document.corpus.pk,
alignment.original_fragment.language.iso,
alignment.translated_fragment.language.iso))
def form_valid(self, form):
"""Sets the User and Alignment on the created instance."""
form.instance.annotated_by = self.request.user
form.instance.alignment = self.get_alignment()
return super(AnnotationCreate, self).form_valid(form)
def get_alignment(self):
"""Retrieves the Alignment by the pk in the kwargs."""
if not self.alignment:
self.alignment = get_object_or_404(self.get_alignments(), pk=self.kwargs['pk'])
return self.alignment
class AnnotationUpdate(AnnotationUpdateMixin, generic.UpdateView):
success_message = 'Annotation edited successfully'
def form_valid(self, form):
"""Sets the last modified by on the instance."""
form.instance.last_modified_by = self.request.user
return super(AnnotationUpdate, self).form_valid(form)
class AnnotationDelete(AnnotationUpdateMixin, generic.DeleteView):
success_message = 'Annotation deleted successfully'
class AnnotationChoose(PermissionRequiredMixin, generic.RedirectView):
permanent = False
pattern_name = 'annotations:create'
permission_required = 'annotations.change_annotation'
def get_redirect_url(self, *args, **kwargs):
"""Redirects to the next open Alignment."""
l1 = Language.objects.get(iso=self.kwargs['l1'])
l2 = Language.objects.get(iso=self.kwargs['l2'])
corpus = Corpus.objects.get(pk=int(self.kwargs['corpus'])) if 'corpus' in self.kwargs else None
next_alignment = get_next_alignment(self.request.user, l1, l2, corpus)
# If no next Alignment has been found, redirect to the status overview
if not next_alignment:
messages.success(self.request, 'All work is done for this language pair!')
return reverse('annotations:status')
corpus_pk = next_alignment.original_fragment.document.corpus.pk
return super().get_redirect_url(corpus_pk, next_alignment.pk)
############
# CRUD Fragment
############
class FragmentDetailMixin(LoginRequiredMixin):
model = Fragment
def get_object(self, queryset=None):
qs = Fragment.objects \
.select_related('document__corpus', 'language', 'tense') \
.prefetch_related('original', 'sentence_set__word_set')
fragment = super().get_object(qs)
return fragment
class FragmentDetail(FragmentDetailMixin, generic.DetailView):
def get_context_data(self, **kwargs):
context = super(FragmentDetail, self).get_context_data(**kwargs)
fragment = self.object
limit = 5 # TODO: magic number
doc_sentences = get_xml_sentences(fragment, limit)
context['sentences'] = doc_sentences or fragment.sentence_set.all()
context['limit'] = limit
return context
class FragmentDetailPlain(FragmentDetailMixin, generic.DetailView):
template_name = 'annotations/fragment_detail_plain.html'
class FragmentRevisionWithCommentMixin(RevisionWithCommentMixin):
def format_change_for_field(self, field, value):
if field == 'formal_structure':
return 'formal structure to ' + find_in_enum(value, Fragment.FORMAL_STRUCTURES)
if field == 'sentence_function':
return 'sentence function to ' + find_in_enum(value, Fragment.SENTENCE_FUNCTIONS)
return super().format_change_for_field(field, value)
class FragmentEdit(SelectSegmentMixin, LoginRequiredMixin, FragmentRevisionWithCommentMixin, generic.UpdateView):
model = Fragment
form_class = FragmentForm
def get_context_data(self, **kwargs):
"""Sets the annotated Words on the context."""
context = super(FragmentEdit, self).get_context_data(**kwargs)
context['annotated_words'] = self.object.targets()
return context
def get_success_url(self):
return reverse('annotations:show', args=(self.object.pk,))
def form_valid(self, form):
"""Updates the target words."""
for word in Word.objects.filter(sentence__fragment=self.object):
word.is_target = word in form.cleaned_data['words']
word.save()
return super(FragmentEdit, self).form_valid(form)
############
# CRUD Corpus
############
class CorpusList(LoginRequiredMixin, generic.ListView):
model = Corpus
context_object_name = 'corpora'
ordering = 'title'
class CorpusDetail(LoginRequiredMixin, generic.DetailView):
model = Corpus
def get_context_data(self, **kwargs):
context = super(CorpusDetail, self).get_context_data(**kwargs)
# Retrieve all Documents and order them by title
corpus = self.object
documents = {d.pk: d.title for d in corpus.documents.all()}
documents_sorted = sorted(list(documents.items()), key=lambda x: natural_sort_key(x[1]))
document_pks = [d[0] for d in documents_sorted]
# Create a list of Languages
languages = defaultdict(list)
for language in corpus.languages.all():
languages[language.title] = [None] * len(document_pks)
# Retrieve the number of Annotations per document
by_document = Annotation.objects. \
filter(alignment__translated_fragment__document__corpus=corpus). \
values('alignment__translated_fragment__language__title',
'alignment__translated_fragment__document__pk'). \
annotate(Count('pk'))
# Wrap the number of Annotations into the list of Languages
for d in by_document:
language = d.get('alignment__translated_fragment__language__title')
document_pk = d.get('alignment__translated_fragment__document__pk')
# Additional sanity check:
# happens if the language is not defined as a Corpus language, but nevertheless Annotations exist.
if languages.get(language):
index = document_pks.index(document_pk)
languages[language][index] = d.get('pk__count')
# And finally, append the list of Document and Languages to the context
context['documents'] = documents_sorted
context['languages'] = dict(languages)
return context
############
# CRUD Document
############
class DocumentDetail(LoginRequiredMixin, generic.DetailView):
model = Document
############
# CRUD Source
############
class SourceDetail(LoginRequiredMixin, generic.DetailView):
model = Source
def get_object(self, queryset=None):
qs = Source.objects.select_related('document__corpus', 'language')
source = super(SourceDetail, self).get_object(qs)
return source
def get_context_data(self, **kwargs):
context = super(SourceDetail, self).get_context_data(**kwargs)
source = self.object
tree, failed_lookups = bind_annotations_to_xml(source)
additional_sources = Source.objects \
.filter(document=source.document) \
.exclude(pk=source.pk) \
.select_related('language')
transform = etree.XSLT(etree.fromstring(render_to_string('annotations/xml_transform.xslt').encode('utf-8')))
context['sentences'] = [transform(p) for p in tree.iter('p', 'head')]
context['failed_lookups'] = failed_lookups
context['additional_sources'] = additional_sources
context['rows'] = [(x,) for x in context['sentences']]
additional_source = self.request.GET.get('additional_source')
if additional_source:
source = get_object_or_404(Source, pk=additional_source)
add_tree, add_failed_lookups = bind_annotations_to_xml(source)
context['additional_source'] = source
context['additional_sentences'] = [transform(p) for p in add_tree.iter('p', 'head')]
context['failed_lookups'] = context['failed_lookups'].extend(add_failed_lookups)
context['rows'] = zip(context['sentences'], context['additional_sentences'])
return context
############
# List views
############
class AnnotationList(PermissionRequiredMixin, FluidMixin, FilterView):
context_object_name = 'annotations'
filterset_class = AnnotationFilter
paginate_by = 15
permission_required = 'annotations.change_annotation'
def get_queryset(self):
"""
Retrieves all Annotations for the given source (l1) and target (l2) language.
:return: A QuerySet of Annotations.
"""
target_words = Sentence.objects. \
prefetch_related(Prefetch('word_set', queryset=Word.objects.filter(is_target=True)))
return Annotation.objects \
.filter(alignment__original_fragment__language__iso=self.kwargs['l1']) \
.filter(alignment__translated_fragment__language__iso=self.kwargs['l2']) \
.filter(alignment__original_fragment__document__corpus__in=get_available_corpora(self.request.user)) \
.select_related('annotated_by',
'tense',
'alignment__original_fragment',
'alignment__original_fragment__document',
'alignment__original_fragment__tense',
'alignment__translated_fragment') \
.prefetch_related('alignment__original_fragment__sentence_set__word_set',
Prefetch('alignment__original_fragment__sentence_set', queryset=target_words,
to_attr='targets_prefetched'),
'alignment__translated_fragment__sentence_set__word_set',
'alignment__original_fragment__labels',
'labels',
'words') \
.order_by('-annotated_at')
def get_filterset(self, filterset_class):
kwargs = self.get_filterset_kwargs(filterset_class)
request = kwargs['request']
l1, l2 = request.resolver_match.kwargs['l1'], request.resolver_match.kwargs['l2']
session_key = 'annotation_filter_{}_{}'.format(l1, l2)
if kwargs['data']:
request.session[session_key] = kwargs['data'].urlencode()
elif session_key in request.session:
kwargs['data'] = QueryDict(request.session[session_key])
return filterset_class(l1, l2, **kwargs)
class FragmentList(PermissionRequiredMixin, generic.ListView):
"""
TODO: consider refactoring, too many queries.
"""
context_object_name = 'fragments'
template_name = 'annotations/fragment_list.html'
paginate_by = 25
permission_required = 'annotations.change_annotation'
def get_queryset(self):
"""
Retrieves all Fragments for the given language that have an Annotation that contains a target expression.
:return: A list of Fragments.
"""
results = []
fragments = Fragment.objects.filter(language__iso=self.kwargs['language']) \
.filter(document__corpus__in=get_available_corpora(self.request.user))
for fragment in fragments:
if Annotation.objects.filter(alignment__original_fragment=fragment, is_no_target=False).exists():
results.append(fragment)
if len(results) == 50: # TODO: Capping this for now with a magic number.
break
return results
def get_context_data(self, **kwargs):
"""
Sets the current language and other_languages on the context.
:param kwargs: Contains the current language.
:return: The context variables.
"""
context = super(FragmentList, self).get_context_data(**kwargs)
language = self.kwargs['language']
corpus = context['fragments'][0].document.corpus
context['language'] = Language.objects.filter(iso=language)
context['other_languages'] = corpus.languages.exclude(iso=language)
context['show_tenses'] = self.kwargs.get('showtenses', False)
return context
class TenseCategoryList(PermissionRequiredMixin, FluidMixin, generic.ListView):
model = TenseCategory
context_object_name = 'tensecategories'
template_name = 'annotations/tenses.html'
permission_required = 'annotations.change_annotation'
def get_context_data(self, **kwargs):
"""
Sets the tenses and languages on the context.
:return: The context variables.
"""
context = super(TenseCategoryList, self).get_context_data(**kwargs)
tense_cache = {(t.category.title, t.language.iso): t.title for t in
Tense.objects.select_related('category', 'language')}
tense_categories = TenseCategory.objects.all()
tenses = defaultdict(list)
languages = []
for language in Language.objects.order_by('iso'):
if not Tense.objects.filter(language=language):
continue
languages.append(language)
for tc in tense_categories:
tense = tense_cache.get((tc.title, language.iso), '')
tenses[tc].append(tense)
context['tenses'] = sorted(list(tenses.items()), key=lambda item: item[0].pk)
context['languages'] = languages
return context
class LabelList(PermissionRequiredMixin, FluidMixin, generic.ListView):
model = LabelKey
context_object_name = 'labelkeys'
template_name = 'annotations/labels.html'
permission_required = 'annotations.change_annotation'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
corpus = self.kwargs.get('corpus')
if corpus:
corpus = Corpus.objects.get(pk=corpus)
else:
corpus = get_available_corpora(self.request.user)[0]
self.object_list = self.object_list.filter(corpora=corpus)
context['label_keys'] = self.object_list
labels = [key.labels.all() for key in self.object_list]
# transpose the 2d array stored in labels so that we could have each label key
# show in a column on the html table
transposed = []
max_len = max([len(x) for x in labels]) if labels else 0
for i in range(max_len):
transposed.append([])
for group in labels:
if len(group) > i:
transposed[-1].append(group[i])
else:
# add empty table cells
transposed[-1].append('')
context['labels'] = transposed
context['corpus'] = corpus
context['corpora'] = get_available_corpora(self.request.user)
return context
##############
# Export views
##############
class PrepareDownload(PrepareDownloadMixin, generic.TemplateView):
template_name = 'annotations/download.html'
class ExportPOSPrepare(PermissionRequiredMixin, generic.View):
permission_required = 'annotations.change_annotation'
def get(self, request, *args, **kwargs):
language = self.request.GET['language']
corpus_id = self.request.GET['corpus']
subcorpus_id = self.request.GET['subcorpus']
document_id = self.request.GET['document']
include_non_targets = 'include_non_targets' in self.request.GET
add_lemmata = 'add_lemmata' in self.request.GET
pos_file = NamedTemporaryFile(delete=False)
self.request.session['pos_file'] = pos_file.name
corpus = Corpus.objects.get(pk=int(corpus_id))
subcorpus = SubCorpus.objects.get(pk=int(subcorpus_id)) if subcorpus_id != 'all' else None
document = Document.objects.get(pk=int(document_id)) if document_id != 'all' else None
document_title = document.title if document_id != 'all' else 'all'
filename = '{}-{}-{}.xlsx'.format(urlquote(corpus.title), urlquote(document_title), language)
self.request.session['pos_filename'] = filename
export_annotations(pos_file.name, XLSX, corpus, language,
subcorpus=subcorpus, document=document,
include_non_targets=include_non_targets, add_lemmata=add_lemmata)
return JsonResponse(dict(done=True))
class ExportPOSDownload(PermissionRequiredMixin, generic.View):
permission_required = 'annotations.change_annotation'
def get(self, request, *args, **kwargs):
pos_file = self.request.session['pos_file']
pos_filename = self.request.session['pos_filename']
with open(pos_file, 'rb') as f:
contents = f.read()
os.unlink(pos_file)
response = HttpResponse(contents, content_type='application/xlsx')
response['Content-Disposition'] = 'attachment; filename={}'.format(pos_filename)
return response
##############
# Import views
##############
class ImportLabelsView(SuperuserRequiredMixin, ImportMixin):
"""
Allows superusers to import labels to Annotations and Fragments.
"""
form_class = LabelImportForm
template_name = 'annotations/label_form.html'
success_message = 'Successfully imported the labels!'
def get_success_url(self):
return reverse('annotations:import-labels')
class AddFragmentsView(SuperuserRequiredMixin, ImportFragmentsMixin):
"""
Allows superusers to import Fragments.
"""
form_class = AddFragmentsForm
template_name = 'annotations/add_fragments_form.html'
success_message = 'Successfully added the fragments!'
def get_success_url(self):
return reverse('annotations:add-fragments')
| mit | 3,640,639,079,216,742,400 | 38.389313 | 116 | 0.647868 | false |
Yelp/paasta | paasta_tools/paastaapi/model/marathon_mesos_nonrunning_task.py | 1 | 7535 | # coding: utf-8
"""
Paasta API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from paasta_tools.paastaapi.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from paasta_tools.paastaapi.model.task_tail_lines import TaskTailLines
globals()['TaskTailLines'] = TaskTailLines
class MarathonMesosNonrunningTask(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'deployed_timestamp': (float,), # noqa: E501
'hostname': (str,), # noqa: E501
'id': (str,), # noqa: E501
'state': (str,), # noqa: E501
'tail_lines': (TaskTailLines,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'deployed_timestamp': 'deployed_timestamp', # noqa: E501
'hostname': 'hostname', # noqa: E501
'id': 'id', # noqa: E501
'state': 'state', # noqa: E501
'tail_lines': 'tail_lines', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""MarathonMesosNonrunningTask - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
deployed_timestamp (float): The unix timestamp at which the task was deployed. [optional] # noqa: E501
hostname (str): Name of the Mesos agent on which this task is running. [optional] # noqa: E501
id (str): The ID of the task in Mesos. [optional] # noqa: E501
state (str): The current state of the task. [optional] # noqa: E501
tail_lines (TaskTailLines): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| apache-2.0 | 6,798,839,742,654,500,000 | 39.294118 | 124 | 0.563769 | false |
shubhdev/edx-platform | lms/djangoapps/certificates/tests/test_views.py | 1 | 30514 | """Tests for certificates views. """
import json
import ddt
from uuid import uuid4
from nose.plugins.attrib import attr
from mock import patch
from django.conf import settings
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
from django.test.utils import override_settings
from opaque_keys.edx.locator import CourseLocator
from openedx.core.lib.tests.assertions.events import assert_event_matches
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from track.tests import EventTrackingTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from certificates.api import get_certificate_url
from certificates.models import (
ExampleCertificateSet,
ExampleCertificate,
GeneratedCertificate,
BadgeAssertion,
CertificateStatuses,
CertificateHtmlViewConfiguration,
CertificateSocialNetworks,
)
from certificates.tests.factories import (
CertificateHtmlViewConfigurationFactory,
LinkedInAddToProfileConfigurationFactory,
BadgeAssertionFactory,
)
from lms import urls
FEATURES_WITH_CERTS_ENABLED = settings.FEATURES.copy()
FEATURES_WITH_CERTS_ENABLED['CERTIFICATES_HTML_VIEW'] = True
FEATURES_WITH_CERTS_DISABLED = settings.FEATURES.copy()
FEATURES_WITH_CERTS_DISABLED['CERTIFICATES_HTML_VIEW'] = False
@attr('shard_1')
@ddt.ddt
class UpdateExampleCertificateViewTest(TestCase):
"""Tests for the XQueue callback that updates example certificates. """
COURSE_KEY = CourseLocator(org='test', course='test', run='test')
DESCRIPTION = 'test'
TEMPLATE = 'test.pdf'
DOWNLOAD_URL = 'http://www.example.com'
ERROR_REASON = 'Kaboom!'
def setUp(self):
super(UpdateExampleCertificateViewTest, self).setUp()
self.cert_set = ExampleCertificateSet.objects.create(course_key=self.COURSE_KEY)
self.cert = ExampleCertificate.objects.create(
example_cert_set=self.cert_set,
description=self.DESCRIPTION,
template=self.TEMPLATE,
)
self.url = reverse('certificates.views.update_example_certificate')
# Since rate limit counts are cached, we need to clear
# this before each test.
cache.clear()
def test_update_example_certificate_success(self):
response = self._post_to_view(self.cert, download_url=self.DOWNLOAD_URL)
self._assert_response(response)
self.cert = ExampleCertificate.objects.get()
self.assertEqual(self.cert.status, ExampleCertificate.STATUS_SUCCESS)
self.assertEqual(self.cert.download_url, self.DOWNLOAD_URL)
def test_update_example_certificate_invalid_key(self):
payload = {
'xqueue_header': json.dumps({
'lms_key': 'invalid'
}),
'xqueue_body': json.dumps({
'username': self.cert.uuid,
'url': self.DOWNLOAD_URL
})
}
response = self.client.post(self.url, data=payload)
self.assertEqual(response.status_code, 404)
def test_update_example_certificate_error(self):
response = self._post_to_view(self.cert, error_reason=self.ERROR_REASON)
self._assert_response(response)
self.cert = ExampleCertificate.objects.get()
self.assertEqual(self.cert.status, ExampleCertificate.STATUS_ERROR)
self.assertEqual(self.cert.error_reason, self.ERROR_REASON)
@ddt.data('xqueue_header', 'xqueue_body')
def test_update_example_certificate_invalid_params(self, missing_param):
payload = {
'xqueue_header': json.dumps({
'lms_key': self.cert.access_key
}),
'xqueue_body': json.dumps({
'username': self.cert.uuid,
'url': self.DOWNLOAD_URL
})
}
del payload[missing_param]
response = self.client.post(self.url, data=payload)
self.assertEqual(response.status_code, 400)
def test_update_example_certificate_missing_download_url(self):
payload = {
'xqueue_header': json.dumps({
'lms_key': self.cert.access_key
}),
'xqueue_body': json.dumps({
'username': self.cert.uuid
})
}
response = self.client.post(self.url, data=payload)
self.assertEqual(response.status_code, 400)
def test_update_example_cetificate_non_json_param(self):
payload = {
'xqueue_header': '{/invalid',
'xqueue_body': '{/invalid'
}
response = self.client.post(self.url, data=payload)
self.assertEqual(response.status_code, 400)
def test_unsupported_http_method(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 405)
def test_bad_request_rate_limiting(self):
payload = {
'xqueue_header': json.dumps({
'lms_key': 'invalid'
}),
'xqueue_body': json.dumps({
'username': self.cert.uuid,
'url': self.DOWNLOAD_URL
})
}
# Exceed the rate limit for invalid requests
# (simulate a DDOS with invalid keys)
for _ in range(100):
response = self.client.post(self.url, data=payload)
if response.status_code == 403:
break
# The final status code should indicate that the rate
# limit was exceeded.
self.assertEqual(response.status_code, 403)
def _post_to_view(self, cert, download_url=None, error_reason=None):
"""Simulate a callback from the XQueue to the example certificate end-point. """
header = {'lms_key': cert.access_key}
body = {'username': cert.uuid}
if download_url is not None:
body['url'] = download_url
if error_reason is not None:
body['error'] = 'error'
body['error_reason'] = self.ERROR_REASON
payload = {
'xqueue_header': json.dumps(header),
'xqueue_body': json.dumps(body)
}
return self.client.post(self.url, data=payload)
def _assert_response(self, response):
"""Check the response from the callback end-point. """
content = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(content['return_code'], 0)
def fakemicrosite(name, default=None):
"""
This is a test mocking function to return a microsite configuration
"""
if name == 'microsite_config_key':
return 'test_microsite'
else:
return default
@attr('shard_1')
class MicrositeCertificatesViewsTests(ModuleStoreTestCase):
"""
Tests for the microsite certificates web/html views
"""
def setUp(self):
super(MicrositeCertificatesViewsTests, self).setUp()
self.client = Client()
self.course = CourseFactory.create(
org='testorg', number='run1', display_name='refundable course'
)
self.course_id = self.course.location.course_key
self.user = UserFactory.create(
email='[email protected]',
username='joeuser',
password='foo'
)
self.user.profile.name = "Joe User"
self.user.profile.save()
self.client.login(username=self.user.username, password='foo')
self.cert = GeneratedCertificate.objects.create(
user=self.user,
course_id=self.course_id,
verify_uuid=uuid4(),
download_uuid=uuid4(),
grade="0.95",
key='the_key',
distinction=True,
status='generated',
mode='honor',
name=self.user.profile.name,
)
def _certificate_html_view_configuration(self, configuration_string, enabled=True):
"""
This will create a certificate html configuration
"""
config = CertificateHtmlViewConfiguration(enabled=enabled, configuration=configuration_string)
config.save()
return config
def _add_course_certificates(self, count=1, signatory_count=0, is_active=True):
"""
Create certificate for the course.
"""
signatories = [
{
'name': 'Signatory_Name ' + str(i),
'title': 'Signatory_Title ' + str(i),
'organization': 'Signatory_Organization ' + str(i),
'signature_image_path': '/static/certificates/images/demo-sig{}.png'.format(i),
'id': i,
} for i in xrange(signatory_count)
]
certificates = [
{
'id': i,
'name': 'Name ' + str(i),
'description': 'Description ' + str(i),
'course_title': 'course_title_' + str(i),
'org_logo_path': '/t4x/orgX/testX/asset/org-logo-{}.png'.format(i),
'signatories': signatories,
'version': 1,
'is_active': is_active
} for i in xrange(count)
]
self.course.certificates = {'certificates': certificates}
self.course.save()
self.store.update_item(self.course, self.user.id)
@patch("microsite_configuration.microsite.get_value", fakemicrosite)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_html_view_for_microsite(self):
test_configuration_string = """{
"default": {
"accomplishment_class_append": "accomplishment-certificate",
"platform_name": "edX",
"company_about_url": "http://www.edx.org/about-us",
"company_privacy_url": "http://www.edx.org/edx-privacy-policy",
"company_tos_url": "http://www.edx.org/edx-terms-service",
"company_verified_certificate_url": "http://www.edx.org/verified-certificate",
"document_stylesheet_url_application": "/static/certificates/sass/main-ltr.css",
"logo_src": "/static/certificates/images/logo-edx.svg",
"logo_url": "http://www.edx.org"
},
"test_microsite": {
"accomplishment_class_append": "accomplishment-certificate",
"platform_name": "platform_microsite",
"company_about_url": "http://www.microsite.org/about-us",
"company_privacy_url": "http://www.microsite.org/edx-privacy-policy",
"company_tos_url": "http://www.microsite.org/microsite-terms-service",
"company_verified_certificate_url": "http://www.microsite.org/verified-certificate",
"document_stylesheet_url_application": "/static/certificates/sass/main-ltr.css",
"logo_src": "/static/certificates/images/logo-microsite.svg",
"logo_url": "http://www.microsite.org",
"company_about_description": "This is special microsite aware company_about_description content",
"company_about_title": "Microsite title"
},
"honor": {
"certificate_type": "Honor Code",
"document_body_class_append": "is-honorcode"
}
}"""
config = self._certificate_html_view_configuration(configuration_string=test_configuration_string)
self.assertEquals(config.configuration, test_configuration_string)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=self.course.id.to_deprecated_string() # pylint: disable=no-member
)
self._add_course_certificates(count=1, signatory_count=2)
response = self.client.get(test_url)
self.assertIn('platform_microsite', response.content)
self.assertIn('http://www.microsite.org', response.content)
self.assertIn('This is special microsite aware company_about_description content', response.content)
self.assertIn('Microsite title', response.content)
@patch("microsite_configuration.microsite.get_value", fakemicrosite)
def test_html_view_microsite_configuration_missing(self):
test_configuration_string = """{
"default": {
"accomplishment_class_append": "accomplishment-certificate",
"platform_name": "edX",
"company_about_url": "http://www.edx.org/about-us",
"company_privacy_url": "http://www.edx.org/edx-privacy-policy",
"company_tos_url": "http://www.edx.org/edx-terms-service",
"company_verified_certificate_url": "http://www.edx.org/verified-certificate",
"document_stylesheet_url_application": "/static/certificates/sass/main-ltr.css",
"logo_src": "/static/certificates/images/logo-edx.svg",
"logo_url": "http://www.edx.org",
"company_about_description": "This should not survive being overwritten by static content"
},
"honor": {
"certificate_type": "Honor Code",
"document_body_class_append": "is-honorcode"
}
}"""
config = self._certificate_html_view_configuration(configuration_string=test_configuration_string)
self.assertEquals(config.configuration, test_configuration_string)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=self.course.id.to_deprecated_string() # pylint: disable=no-member
)
self._add_course_certificates(count=1, signatory_count=2)
response = self.client.get(test_url)
self.assertIn('edX', response.content)
self.assertNotIn('platform_microsite', response.content)
self.assertNotIn('http://www.microsite.org', response.content)
self.assertNotIn('This should not survive being overwritten by static content', response.content)
@attr('shard_1')
class CertificatesViewsTests(ModuleStoreTestCase, EventTrackingTestCase):
"""
Tests for the certificates web/html views
"""
def setUp(self):
super(CertificatesViewsTests, self).setUp()
self.client = Client()
self.course = CourseFactory.create(
org='testorg', number='run1', display_name='refundable course'
)
self.course_id = self.course.location.course_key
self.user = UserFactory.create(
email='[email protected]',
username='joeuser',
password='foo'
)
self.user.profile.name = "Joe User"
self.user.profile.save()
self.client.login(username=self.user.username, password='foo')
self.cert = GeneratedCertificate.objects.create(
user=self.user,
course_id=self.course_id,
verify_uuid=uuid4(),
download_uuid=uuid4(),
grade="0.95",
key='the_key',
distinction=True,
status='generated',
mode='honor',
name=self.user.profile.name,
)
CourseEnrollmentFactory.create(
user=self.user,
course_id=self.course_id
)
CertificateHtmlViewConfigurationFactory.create()
LinkedInAddToProfileConfigurationFactory.create()
def _add_course_certificates(self, count=1, signatory_count=0, is_active=True):
"""
Create certificate for the course.
"""
signatories = [
{
'name': 'Signatory_Name ' + str(i),
'title': 'Signatory_Title ' + str(i),
'organization': 'Signatory_Organization ' + str(i),
'signature_image_path': '/static/certificates/images/demo-sig{}.png'.format(i),
'id': i,
} for i in xrange(0, signatory_count)
]
certificates = [
{
'id': i,
'name': 'Name ' + str(i),
'description': 'Description ' + str(i),
'course_title': 'course_title_' + str(i),
'org_logo_path': '/t4x/orgX/testX/asset/org-logo-{}.png'.format(i),
'signatories': signatories,
'version': 1,
'is_active': is_active
} for i in xrange(0, count)
]
self.course.certificates = {'certificates': certificates}
self.course.save()
self.store.update_item(self.course, self.user.id)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_render_html_view_valid_certificate(self):
test_url = get_certificate_url(
user_id=self.user.id,
course_id=unicode(self.course.id) # pylint: disable=no-member
)
self._add_course_certificates(count=1, signatory_count=2)
response = self.client.get(test_url)
self.assertIn(str(self.cert.verify_uuid), response.content)
# Hit any "verified" mode-specific branches
self.cert.mode = 'verified'
self.cert.save()
response = self.client.get(test_url)
self.assertIn(str(self.cert.verify_uuid), response.content)
# Hit any 'xseries' mode-specific branches
self.cert.mode = 'xseries'
self.cert.save()
response = self.client.get(test_url)
self.assertIn(str(self.cert.verify_uuid), response.content)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_render_html_view_with_valid_signatories(self):
test_url = get_certificate_url(
user_id=self.user.id,
course_id=self.course.id.to_deprecated_string() # pylint: disable=no-member
)
self._add_course_certificates(count=1, signatory_count=2)
response = self.client.get(test_url)
self.assertIn('course_title_0', response.content)
self.assertIn('/t4x/orgX/testX/asset/org-logo-0.png', response.content)
self.assertIn('Signatory_Name 0', response.content)
self.assertIn('Signatory_Title 0', response.content)
self.assertIn('Signatory_Organization 0', response.content)
self.assertIn('/static/certificates/images/demo-sig0.png', response.content)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_course_display_name_not_override_with_course_title(self):
# if certificate in descriptor has not course_title then course name should not be overridden with this title.
test_url = get_certificate_url(
user_id=self.user.id,
course_id=self.course.id.to_deprecated_string() # pylint: disable=no-member
)
test_certificates = [
{
'id': 0,
'name': 'Name 0',
'description': 'Description 0',
'signatories': [],
'version': 1,
'is_active':True
}
]
self.course.certificates = {'certificates': test_certificates}
self.course.save()
self.store.update_item(self.course, self.user.id)
response = self.client.get(test_url)
self.assertNotIn('test_course_title_0', response.content)
self.assertIn('refundable course', response.content)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_certificate_view_without_org_logo(self):
test_url = get_certificate_url(
user_id=self.user.id,
course_id=self.course.id.to_deprecated_string() # pylint: disable=no-member
)
test_certificates = [
{
'id': 0,
'name': 'Certificate Name 0',
'signatories': [],
'version': 1,
'is_active': True
}
]
self.course.certificates = {'certificates': test_certificates}
self.course.save()
self.store.update_item(self.course, self.user.id)
response = self.client.get(test_url)
# make sure response html has only one organization logo container for edX
self.assertContains(response, "<li class=\"wrapper-organization\">", 1)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_render_html_view_without_signatories(self):
test_url = get_certificate_url(
user_id=self.user.id,
course_id=self.course.id.to_deprecated_string() # pylint: disable=no-member
)
self._add_course_certificates(count=1, signatory_count=0)
response = self.client.get(test_url)
self.assertNotIn('Signatory_Name 0', response.content)
self.assertNotIn('Signatory_Title 0', response.content)
@override_settings(FEATURES=FEATURES_WITH_CERTS_DISABLED)
def test_render_html_view_invalid_feature_flag(self):
test_url = get_certificate_url(
user_id=self.user.id,
course_id=self.course.id.to_deprecated_string() # pylint: disable=no-member
)
response = self.client.get(test_url)
self.assertIn('invalid', response.content)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_render_html_view_invalid_course_id(self):
test_url = get_certificate_url(
user_id=self.user.id,
course_id='az/23423/4vs'
)
response = self.client.get(test_url)
self.assertIn('invalid', response.content)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_render_html_view_invalid_course(self):
test_url = get_certificate_url(
user_id=self.user.id,
course_id='missing/course/key'
)
response = self.client.get(test_url)
self.assertIn('invalid', response.content)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_render_html_view_invalid_user(self):
test_url = get_certificate_url(
user_id=111,
course_id=self.course.id.to_deprecated_string() # pylint: disable=no-member
)
response = self.client.get(test_url)
self.assertIn('invalid', response.content)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_render_html_view_invalid_user_certificate(self):
self.cert.delete()
self.assertEqual(len(GeneratedCertificate.objects.all()), 0)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=self.course.id.to_deprecated_string() # pylint: disable=no-member
)
response = self.client.get(test_url)
self.assertIn('invalid', response.content)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_render_html_view_with_preview_mode(self):
"""
test certificate web view should render properly along with its signatories information when accessing it in
preview mode. Either the certificate is marked active or not.
"""
self.cert.delete()
self.assertEqual(len(GeneratedCertificate.objects.all()), 0)
self._add_course_certificates(count=1, signatory_count=2)
test_url = get_certificate_url(
user_id=self.user.id,
course_id=self.course.id.to_deprecated_string() # pylint: disable=no-member
)
response = self.client.get(test_url + '?preview=honor')
self.assertNotIn(self.course.display_name, response.content)
self.assertIn('course_title_0', response.content)
self.assertIn('Signatory_Title 0', response.content)
# mark certificate inactive but accessing in preview mode.
self._add_course_certificates(count=1, signatory_count=2, is_active=False)
response = self.client.get(test_url + '?preview=honor')
self.assertNotIn(self.course.display_name, response.content)
self.assertIn('course_title_0', response.content)
self.assertIn('Signatory_Title 0', response.content)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_render_html_view_invalid_certificate_configuration(self):
test_url = get_certificate_url(
user_id=self.user.id,
course_id=unicode(self.course.id)
)
response = self.client.get(test_url)
self.assertIn("Invalid Certificate", response.content)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_certificate_evidence_event_emitted(self):
self.client.logout()
self._add_course_certificates(count=1, signatory_count=2)
self.recreate_tracker()
test_url = get_certificate_url(
user_id=self.user.id,
course_id=unicode(self.course.id)
)
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
actual_event = self.get_event()
self.assertEqual(actual_event['name'], 'edx.certificate.evidence_visited')
assert_event_matches(
{
'user_id': self.user.id,
'certificate_id': unicode(self.cert.verify_uuid),
'enrollment_mode': self.cert.mode,
'certificate_url': test_url,
'course_id': unicode(self.course.id),
'social_network': CertificateSocialNetworks.linkedin
},
actual_event['data']
)
@override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED)
def test_evidence_event_sent(self):
test_url = get_certificate_url(user_id=self.user.id, course_id=self.course_id) + '?evidence_visit=1'
self.recreate_tracker()
assertion = BadgeAssertion(
user=self.user, course_id=self.course_id, mode='honor',
data={
'image': 'http://www.example.com/image.png',
'json': {'id': 'http://www.example.com/assertion.json'},
'issuer': 'http://www.example.com/issuer.json',
}
)
assertion.save()
response = self.client.get(test_url)
self.assertEqual(response.status_code, 200)
assert_event_matches(
{
'name': 'edx.badge.assertion.evidence_visited',
'data': {
'course_id': 'testorg/run1/refundable_course',
# pylint: disable=no-member
'assertion_id': assertion.id,
'assertion_json_url': 'http://www.example.com/assertion.json',
'assertion_image_url': 'http://www.example.com/image.png',
'user_id': self.user.id,
'issuer': 'http://www.example.com/issuer.json',
'enrollment_mode': 'honor',
},
},
self.get_event()
)
@override_settings(FEATURES=FEATURES_WITH_CERTS_DISABLED)
def test_request_certificate_without_passing(self):
self.cert.status = CertificateStatuses.unavailable
self.cert.save()
request_certificate_url = reverse('certificates.views.request_certificate')
response = self.client.post(request_certificate_url, {'course_id': unicode(self.course.id)})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(CertificateStatuses.notpassing, response_json['add_status'])
@override_settings(FEATURES=FEATURES_WITH_CERTS_DISABLED)
@override_settings(CERT_QUEUE='test-queue')
def test_request_certificate_after_passing(self):
self.cert.status = CertificateStatuses.unavailable
self.cert.save()
request_certificate_url = reverse('certificates.views.request_certificate')
with patch('capa.xqueue_interface.XQueueInterface.send_to_queue') as mock_queue:
mock_queue.return_value = (0, "Successfully queued")
with patch('courseware.grades.grade') as mock_grade:
mock_grade.return_value = {'grade': 'Pass', 'percent': 0.75}
response = self.client.post(request_certificate_url, {'course_id': unicode(self.course.id)})
self.assertEqual(response.status_code, 200)
response_json = json.loads(response.content)
self.assertEqual(CertificateStatuses.generating, response_json['add_status'])
class TrackShareRedirectTest(ModuleStoreTestCase, EventTrackingTestCase):
"""
Verifies the badge image share event is sent out.
"""
def setUp(self):
super(TrackShareRedirectTest, self).setUp()
self.client = Client()
self.course = CourseFactory.create(
org='testorg', number='run1', display_name='trackable course'
)
self.assertion = BadgeAssertionFactory(
user=self.user, course_id=self.course.id, data={
'image': 'http://www.example.com/image.png',
'json': {'id': 'http://www.example.com/assertion.json'},
'issuer': 'http://www.example.com/issuer.json',
},
)
# Enabling the feature flag isn't enough to change the URLs-- they're already loaded by this point.
self.old_patterns = urls.urlpatterns
urls.urlpatterns += (urls.BADGE_SHARE_TRACKER_URL,)
def tearDown(self):
super(TrackShareRedirectTest, self).tearDown()
urls.urlpatterns = self.old_patterns
def test_social_event_sent(self):
test_url = '/certificates/badge_share_tracker/{}/social_network/{}/'.format(
unicode(self.course.id),
self.user.username,
)
self.recreate_tracker()
response = self.client.get(test_url)
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], 'http://www.example.com/image.png')
assert_event_matches(
{
'name': 'edx.badge.assertion.shared',
'data': {
'course_id': 'testorg/run1/trackable_course',
'social_network': 'social_network',
# pylint: disable=no-member
'assertion_id': self.assertion.id,
'assertion_json_url': 'http://www.example.com/assertion.json',
'assertion_image_url': 'http://www.example.com/image.png',
'user_id': self.user.id,
'issuer': 'http://www.example.com/issuer.json',
'enrollment_mode': 'honor',
},
},
self.get_event()
)
| agpl-3.0 | 988,242,617,299,130,100 | 40.459239 | 118 | 0.606967 | false |
saintleva/limited-apt | tools/obtain-priorities/parsing.py | 1 | 2444 | #!/usr/bin/env python3
#
# Copyright (C) Anton Liaukevich 2011-2020 <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import chardet
from limitedapt.debconf import *
def parse_shell(handle):
has_questions = False
maximal_priority = Priority.LOW
for line in handle:
string = line.lstrip()
if string.startswith("db_input"):
try:
current_priority = Priority.from_string(string.split()[1])
if current_priority > maximal_priority:
maximal_priority = current_priority
has_questions = True
except PriorityConvertingFromStringError:
return PackageState(Status.PROCESSING_ERROR)
return PackageState(Status.HAS_QUESTIONS, maximal_priority) if has_questions else PackageState(Status.HAS_NOT_QUESTIONS)
shell_parser_map = {
"/bin/sh" : parse_shell,
"/bin/bash": parse_shell
}
def process_package(package, control_dir, script_types_fh):
path_to_config_file = os.path.join(control_dir, "config")
if os.path.exists(path_to_config_file):
rawdata = open(path_to_config_file, "rb").read()
encoding = chardet.detect(rawdata)["encoding"]
handle = open(path_to_config_file, "r", encoding=encoding)
shebang_string = handle.readline()
if shebang_string[0:2] != "#!":
return PackageState(Status.PROCESSING_ERROR)
parametrized_shell = shebang_string[2:].strip()
shell = parametrized_shell.split()[0]
print(str(package), encoding, shell, file=script_types_fh, flush=True)
if shell in shell_parser_map:
return shell_parser_map[shell](handle)
else:
return PackageState(Status.PROCESSING_ERROR)
else:
return PackageState(Status.NO_CONFIG_FILE)
| gpl-3.0 | 8,631,690,937,187,985,000 | 37.793651 | 124 | 0.677169 | false |
openstack/heat | heat/tests/openstack/neutron/test_sfc/test_port_chain.py | 1 | 6568 | #
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from heat.engine.clients.os import neutron
from heat.engine.resources.openstack.neutron.sfc import port_chain
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
port_chain_template = {
'heat_template_version': '2015-04-30',
'resources': {
'test_resource': {
'type': 'OS::Neutron::PortChain',
'properties': {
'name': 'test_port_chain',
'description': 'port_chain_desc',
'port_pair_groups': ['port_pair_group_1'],
'flow_classifiers': ['flow_classifier1'],
'chain_parameters': {"correlation": 'mpls'}
}
}
}
}
class PortChainTest(common.HeatTestCase):
def setUp(self):
super(PortChainTest, self).setUp()
self.patchobject(neutron.NeutronClientPlugin, 'has_extension',
return_value=True)
self.ctx = utils.dummy_context()
self.stack = stack.Stack(
self.ctx, 'test_stack',
template.Template(port_chain_template)
)
self.test_resource = self.stack['test_resource']
self.test_client_plugin = mock.MagicMock()
self.test_resource.client_plugin = mock.MagicMock(
return_value=self.test_client_plugin)
self.test_client = mock.MagicMock()
self.test_resource.client = mock.MagicMock(
return_value=self.test_client)
self.test_client_plugin.get_notification = mock.MagicMock(
return_value='sample_notification')
self.patchobject(self.test_client_plugin, 'resolve_ext_resource'
).return_value = ('port_pair_group_1')
self.patchobject(self.test_client_plugin, 'resolve_ext_resource'
).return_value = ('flow_classifier1')
def test_resource_mapping(self):
mapping = port_chain.resource_mapping()
self.assertEqual(port_chain.PortChain,
mapping['OS::Neutron::PortChain'])
def _get_mock_resource(self):
value = mock.MagicMock()
value.id = '477e8273-60a7-4c41-b683-fdb0bc7cd152'
return value
def test_resource_handle_create(self):
mock_pc_create = self.test_client_plugin.create_ext_resource
mock_resource = self._get_mock_resource()
mock_pc_create.return_value = mock_resource
# validate the properties
self.assertEqual(
'test_port_chain',
self.test_resource.properties.get(
port_chain.PortChain.NAME))
self.assertEqual(
'port_chain_desc',
self.test_resource.properties.get(
port_chain.PortChain.DESCRIPTION))
self.assertEqual(
['port_pair_group_1'],
self.test_resource.properties.get(
port_chain.PortChain.PORT_PAIR_GROUPS))
self.assertEqual(
['flow_classifier1'],
self.test_resource.properties.get(
port_chain.PortChain.FLOW_CLASSIFIERS))
self.assertEqual(
{"correlation": 'mpls'},
self.test_resource.properties.get(
port_chain.PortChain.CHAIN_PARAMETERS))
self.test_resource.data_set = mock.Mock()
self.test_resource.handle_create()
mock_pc_create.assert_called_once_with(
'port_chain',
{
'name': 'test_port_chain',
'description': 'port_chain_desc',
'port_pair_groups': ['port_pair_group_1'],
'flow_classifiers': ['flow_classifier1'],
'chain_parameters': {"correlation": 'mpls'}}
)
def delete_portchain(self):
mock_pc_delete = self.test_client_plugin.delete_ext_resource
self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
mock_pc_delete.return_value = None
self.assertIsNone(self.test_resource.handle_delete())
mock_pc_delete.assert_called_once_with(
'port_chain', self.test_resource.resource_id)
def delete_portchain_resource_id_is_none(self):
self.test_resource.resource_id = None
self.assertIsNone(self.test_resource.handle_delete())
self.assertEqual(0, self.test_client_plugin.
delete_ext_resource.call_count)
def test_resource_handle_delete_not_found(self):
self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
mock_pc_delete = self.test_client_plugin.delete_ext_resource
mock_pc_delete.side_effect = self.test_client_plugin.NotFound
self.assertIsNone(self.test_resource.handle_delete())
def test_resource_show_resource(self):
mock_pc_get = self.test_client_plugin.show_ext_resource
mock_pc_get.return_value = None
self.assertIsNone(self.test_resource._show_resource(),
'Failed to show resource')
def test_resource_handle_update(self):
mock_ppg_patch = self.test_client_plugin.update_ext_resource
self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151'
prop_diff = {
'name': 'name-updated',
'description': 'description-updated',
'port_pair_groups': ['port_pair_group_2'],
'flow_classifiers': ['flow_classifier2'],
}
self.test_resource.handle_update(json_snippet=None,
tmpl_diff=None,
prop_diff=prop_diff)
mock_ppg_patch.assert_called_once_with(
'port_chain',
{
'name': 'name-updated',
'description': 'description-updated',
'port_pair_groups': ['port_pair_group_2'],
'flow_classifiers': ['flow_classifier2'],
}, self.test_resource.resource_id)
| apache-2.0 | 103,200,701,343,941,060 | 37.409357 | 79 | 0.600792 | false |
matrumz/RPi_Custom_Files | Printing/hplip-3.15.2/ui4/plugindiagnose.py | 1 | 3164 | # -*- coding: utf-8 -*-
#
# (c) Copyright 2001-2011 Hewlett-Packard Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Amarnath Chitumalla
#
# Local
from base.g import *
from base import device, utils, pkit
from prnt import cups
from base.codes import *
from .ui_utils import *
from installer import pluginhandler
from base.sixext import to_unicode
# Qt
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import signal
# Ui
from .plugindiagnose_base import Ui_Dialog
class PluginDiagnose(QDialog, Ui_Dialog):
def __init__(self, parent, install_mode=PLUGIN_NONE, plugin_reason=PLUGIN_REASON_NONE, upgrade=False):
QDialog.__init__(self, parent)
self.install_mode = install_mode
self.plugin_reason = plugin_reason
self.plugin_path = None
self.result = False
self.pluginObj = pluginhandler.PluginHandle()
self.setupUi(self, upgrade)
self.user_settings = UserSettings()
self.user_settings.load()
self.user_settings.debug()
self.initUi()
def initUi(self):
# connect signals/slots
self.connect(self.CancelButton, SIGNAL("clicked()"), self.CancelButton_clicked)
self.connect(self.NextButton, SIGNAL("clicked()"), self.NextButton_clicked)
signal.signal(signal.SIGINT, signal.SIG_DFL)
# Application icon
self.setWindowIcon(QIcon(load_pixmap('hp_logo', '128x128')))
def PathLineEdit_textChanged(self, t):
self.plugin_path = to_unicode(t)
self.setPathIndicators()
#
# Misc
#
def displayPage(self, page):
self.updateStepText(page)
self.StackedWidget.setCurrentIndex(page)
def CancelButton_clicked(self):
self.close()
def NextButton_clicked(self):
self.NextButton.setEnabled(False)
self.CancelButton.setEnabled(False)
try:
plugin = PLUGIN_REQUIRED
plugin_reason = PLUGIN_REASON_NONE
ok, sudo_ok = pkit.run_plugin_command(plugin == PLUGIN_REQUIRED, plugin_reason)
if not ok or self.pluginObj.getStatus() != pluginhandler.PLUGIN_INSTALLED:
FailureUI(self, self.__tr("Failed to install Plug-in.\nEither you have chosen to skip the Plug-in installation or entered incorrect Password."))
finally:
endWaitCursor()
self.result = True
self.close()
def __tr(self,s,c = None):
return qApp.translate("PluginDialog",s,c)
| gpl-2.0 | 7,540,046,640,401,814,000 | 28.570093 | 161 | 0.677623 | false |
gradiuscypher/internet_illithid | mirror_shield/endpoints/filestore.py | 1 | 2550 | import traceback
import requests
import time
import imghdr
from os.path import exists, isfile, join, isdir
from os import makedirs, listdir, walk
from flask import Blueprint, request, send_from_directory, render_template
filestore = Blueprint('callback', __name__)
@filestore.route('/clone', methods=["POST"])
def clone():
try:
# Grab the JSON content in post
content = request.get_json()
url = content['url']
url_filename = url.split("/")[-1]
sender = content['sender']
source = content['source']
timestamp = int(time.time())
filename = "files/{}/{}/{}-{}".format(source, sender, timestamp, url_filename)
# Check if the user's folder exists
if not exists("files/{}/{}".format(source, sender)):
makedirs("files/{}/{}".format(source, sender))
# Download the file and save to the user's directory
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(filename, 'wb') as f:
for chunk in r:
f.write(chunk)
return filename, 200
except:
print(traceback.format_exc())
return "Fail", 500
@filestore.route('/files/<path:path>', methods=["GET"])
def files(path):
return send_from_directory('files', path)
@filestore.route('/src/<path:path>', methods=["GET"])
def src(path):
print("PATH IS {}".format(path))
return send_from_directory('src', path)
@filestore.route('/', methods=["GET"])
def services():
services = []
filepath = "files/"
for f in listdir(filepath):
if isdir(join(filepath, f)):
services.append(f)
return render_template('servicelist.html', services=services)
@filestore.route('/<service>/userlist', methods=["GET"])
def userlist(service):
users = []
filepath = "files/{}".format(service)
for f in listdir(filepath):
if isdir(join(filepath, f)):
users.append(f)
return render_template('userlist.html', users=users, service=service)
@filestore.route('/<service>/gallery/<user>', methods=["GET"])
def gallery(user, service):
filepath = "files/{}/{}".format(service, user)
images = []
other = []
for f in listdir(filepath):
if isfile(join(filepath, f)):
if imghdr.what(join(filepath, f)) is not None:
images.append(f)
else:
other.append(f)
return render_template('gallery.html', title="Gallery", images=images, filepath=filepath, otherfiles=other)
| mit | -8,529,620,719,959,950,000 | 27.977273 | 111 | 0.606667 | false |
sergeysynergy/graph | django/app/urls.py | 1 | 2642 | # -*- coding: utf-8 -*-
from django.conf.urls import include, url
from django.contrib import admin
import app.settings as settings
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
]
urlpatterns += [
url(r'^static/(?P<path>.*)$', 'django.views.static.serve',{'document_root': settings.URL_STATIC_ROOT}),
#
#
# view-cтраницы, имеющие шаблоны в каталоге templates
url(r'^$', 'zcore.views.index', name='index'),
url(r'^force-d3/(?P<id>[-\w]+)/(?P<graphFilter>.*)/(?P<nodesList>.*)/(?P<color>.*)/$', 'zcore.views.view_force_d3', name='viewForceD3'),
url(r'^force-react/(?P<id>[-\w]+)/(?P<graphFilter>.*)/$', 'zcore.views.view_force_react', name='viewForceReact'),
url(r'^chord/(?P<id>[-\w]+)/$', 'zcore.views.view_chord', name='viewChord'),
url(r'^timeline/(?P<id>[-\w]+)/$', 'zcore.views.view_timeline', name='viewTimeline'),
url(r'^new-project/$', 'zcore.views.view_new_project', name='viewNewProject'),
url(r'^graph/(?P<id>[-\w]+)/$', 'zcore.views.view_graph', name='viewGraph'),
url(r'^map/(?P<gid>[-\w]+)/(?P<nid>[-\w]+)/$', 'zcore.views.view_map', name='viewMap'),
url(r'^create-project/(?P<graphFilter>.*)/$', 'zcore.views.create_project'),
# /view-cтраницы
#
#
#
#
# json-данные
url(r'^json-heap-info/$', 'zcore.serializers.heap_info', name='heapInfo'),
url(r'^json-force-react/(?P<id>[-\w]+)/(?P<gfilter>.*)/$', 'zcore.serializers.json_force_react', name='jsonForceReact'),
#url(r'^json-force-react/(?P<id>[-\w]+)/(?P<graphFilter>.*)/$', 'zcore.views.json_force_react', name='jsonForceReact'),
url(r'^json-force-d3/(?P<id>[-\w]+)/(?P<graphFilter>.*)/(?P<nodesList>.*)/(?P<color>.*)/$', 'zcore.serializers.json_force_d3', name='jsonForced3'),
url(r'^json-chord/(?P<id>[-\w]+)/(?P<gfilter>.*)/$', 'zcore.serializers.json_chord', name='jsonChord'),
url(r'^json-timeline/(?P<id>[-\w]+)/(?P<gfilter>.*)/$', 'zcore.serializers.json_timeline', name='jsonTimeline'),
url(r'^json-main-graph/(?P<id>[-\w]+)/$', 'zcore.serializers.json_main_graph', name='jsonMainGraph'),
url(r'^json-main-graph/(?P<id>[-\w]+)/(?P<gfilter>.*)/$', 'zcore.serializers.json_main_graph', name='jsonMainGraph'),
url(r'^json-transfers/(?P<gid>[-\w]+)/(?P<nid>[-\w]+)/$', 'zcore.serializers.json_transfers', name='jsonTransfers'),
# вывод справочников
url(r'^json-attributes/$', 'zcore.serializers.json_attributes', name='jsonAttributes'),
url(r'^json-taxonomy/$', 'zcore.serializers.json_taxonomy', name='jsonTaxonomy'),
# /json-данные
#
#
]
| mit | -8,054,665,147,103,789,000 | 47.603774 | 151 | 0.60559 | false |
chetan51/nupic.research | sensorimotor/tests/unit/one_d_universe_test.py | 1 | 2335 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import unittest2 as unittest
from sensorimotor.one_d_universe import OneDUniverse
class OneDUniverseTest(unittest.TestCase):
def testEncodeSensorValue(self):
universe = OneDUniverse(debugSensor=True,
nSensor=105, wSensor=5, nMotor=105, wMotor=5)
self.assertEqual(universe.encodeSensorValue(0), set(xrange(0, 5)))
self.assertEqual(universe.encodeSensorValue(19), set(xrange(95, 100)))
self.assertEqual(universe.encodeSensorValue(20), set(xrange(100, 105)))
universe = OneDUniverse(debugSensor=False,
nSensor=105, wSensor=5, nMotor=105, wMotor=5)
self.assertNotEqual(universe.encodeSensorValue(0), set(xrange(0, 5)))
def testEncodeMotorValue(self):
universe = OneDUniverse(debugMotor=True,
nSensor=105, wSensor=5, nMotor=48*21, wMotor=48)
self.assertEqual(universe.encodeMotorValue(-10), set(xrange(0, 48)))
self.assertEqual(universe.encodeMotorValue(0), set(xrange(480, 528)))
self.assertEqual(universe.encodeMotorValue(10), set(xrange(960, 1008)))
universe = OneDUniverse(debugMotor=False,
nSensor=105, wSensor=5, nMotor=48*21, wMotor=48)
self.assertNotEqual(universe.encodeMotorValue(-10), set(xrange(0, 48)))
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | 1,721,112,332,134,884,000 | 39.258621 | 76 | 0.668094 | false |
ucfopen/canvasapi | canvasapi/course.py | 1 | 94642 | import warnings
from canvasapi.assignment import Assignment, AssignmentGroup
from canvasapi.blueprint import BlueprintSubscription
from canvasapi.canvas_object import CanvasObject
from canvasapi.collaboration import Collaboration
from canvasapi.course_epub_export import CourseEpubExport
from canvasapi.custom_gradebook_columns import CustomGradebookColumn
from canvasapi.discussion_topic import DiscussionTopic
from canvasapi.exceptions import RequiredFieldMissing
from canvasapi.feature import Feature, FeatureFlag
from canvasapi.folder import Folder
from canvasapi.gradebook_history import (
Day,
Grader,
SubmissionHistory,
SubmissionVersion,
)
from canvasapi.grading_period import GradingPeriod
from canvasapi.grading_standard import GradingStandard
from canvasapi.license import License
from canvasapi.outcome_import import OutcomeImport
from canvasapi.page import Page
from canvasapi.paginated_list import PaginatedList
from canvasapi.progress import Progress
from canvasapi.quiz import QuizExtension
from canvasapi.rubric import Rubric, RubricAssociation
from canvasapi.submission import GroupedSubmission, Submission
from canvasapi.tab import Tab
from canvasapi.todo import Todo
from canvasapi.upload import FileOrPathLike, Uploader
from canvasapi.usage_rights import UsageRights
from canvasapi.util import (
combine_kwargs,
file_or_path,
is_multivalued,
normalize_bool,
obj_or_id,
obj_or_str,
)
class Course(CanvasObject):
def __str__(self):
return "{} {} ({})".format(self.course_code, self.name, self.id)
def add_grading_standards(self, title, grading_scheme_entry, **kwargs):
"""
Create a new grading standard for the course.
:calls: `POST /api/v1/courses/:course_id/grading_standards \
<https://canvas.instructure.com/doc/api/grading_standards.html#method.grading_standards_api.create>`_
:param title: The title for the Grading Standard
:type title: str
:param grading_scheme: A list of dictionaries containing keys for "name" and "value"
:type grading_scheme: list of dict
:rtype: :class:`canvasapi.grading_standards.GradingStandard`
"""
if not isinstance(grading_scheme_entry, list) or len(grading_scheme_entry) <= 0:
raise ValueError("Param `grading_scheme_entry` must be a non-empty list.")
for entry in grading_scheme_entry:
if not isinstance(entry, dict):
raise ValueError("grading_scheme_entry must consist of dictionaries.")
if "name" not in entry or "value" not in entry:
raise ValueError(
"Dictionaries with keys 'name' and 'value' are required."
)
kwargs["grading_scheme_entry"] = grading_scheme_entry
response = self._requester.request(
"POST",
"courses/%s/grading_standards" % (self.id),
title=title,
_kwargs=combine_kwargs(**kwargs),
)
return GradingStandard(self._requester, response.json())
def column_data_bulk_update(self, column_data, **kwargs):
"""
Set the content of custom columns.
:calls: `PUT /api/v1/courses/:course_id/custom_gradebook_column_data \
<https://canvas.instructure.com/doc/api/custom_gradebook_columns.html#method.custom_gradebook_column_data_api.bulk_update>`_
:param column_data: Content to put into the column
:type column_data: list
:rtype: :class:`canvasapi.progress.Progress`
"""
kwargs["column_data"] = column_data
response = self._requester.request(
"PUT",
"courses/{}/custom_gradebook_column_data".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return Progress(self._requester, response.json())
def conclude(self, **kwargs):
"""
Mark this course as concluded.
:calls: `DELETE /api/v1/courses/:id \
<https://canvas.instructure.com/doc/api/courses.html#method.courses.destroy>`_
:returns: True if the course was concluded, False otherwise.
:rtype: bool
"""
kwargs["event"] = "conclude"
response = self._requester.request(
"DELETE",
"courses/{}".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return response.json().get("conclude")
def create_assignment(self, assignment, **kwargs):
"""
Create a new assignment for this course.
Note: The assignment is created in the active state.
:calls: `POST /api/v1/courses/:course_id/assignments \
<https://canvas.instructure.com/doc/api/assignments.html#method.assignments_api.create>`_
:param assignment: The attributes of the assignment
:type assignment: dict
:rtype: :class:`canvasapi.assignment.Assignment`
"""
from canvasapi.assignment import Assignment
if isinstance(assignment, dict) and "name" in assignment:
kwargs["assignment"] = assignment
else:
raise RequiredFieldMissing("Dictionary with key 'name' is required.")
response = self._requester.request(
"POST",
"courses/{}/assignments".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return Assignment(self._requester, response.json())
def create_assignment_group(self, **kwargs):
"""
Create a new assignment group for this course.
:calls: `POST /api/v1/courses/:course_id/assignment_groups \
<https://canvas.instructure.com/doc/api/assignment_groups.html#method.assignment_groups_api.create>`_
:rtype: :class:`canvasapi.assignment.AssignmentGroup`
"""
from canvasapi.assignment import AssignmentGroup
response = self._requester.request(
"POST",
"courses/{}/assignment_groups".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update({"course_id": self.id})
return AssignmentGroup(self._requester, response_json)
def create_assignment_overrides(self, assignment_overrides, **kwargs):
"""
Create the specified overrides for each assignment.
:calls: `POST /api/v1/courses/:course_id/assignments/overrides \
<https://canvas.instructure.com/doc/api/assignments.html#method.assignment_overrides.batch_create>`_
:param assignment_overrides: Attributes for the new assignment overrides.
:type assignment_overrides: list
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.assignment.AssignmentOverride`
"""
from canvasapi.assignment import AssignmentOverride
kwargs["assignment_overrides"] = assignment_overrides
return PaginatedList(
AssignmentOverride,
self._requester,
"POST",
"courses/{}/assignments/overrides".format(self.id),
{"course_id": self.id},
_kwargs=combine_kwargs(**kwargs),
)
def create_content_migration(self, migration_type, **kwargs):
"""
Create a content migration.
:calls: `POST /api/v1/courses/:course_id/content_migrations \
<https://canvas.instructure.com/doc/api/content_migrations.html#method.content_migrations.create>`_
:param migration_type: The migrator type to use in this migration
:type migration_type: str or :class:`canvasapi.content_migration.Migrator`
:rtype: :class:`canvasapi.content_migration.ContentMigration`
"""
from canvasapi.content_migration import ContentMigration, Migrator
if isinstance(migration_type, Migrator):
kwargs["migration_type"] = migration_type.type
elif isinstance(migration_type, str):
kwargs["migration_type"] = migration_type
else:
raise TypeError("Parameter migration_type must be of type Migrator or str")
response = self._requester.request(
"POST",
"courses/{}/content_migrations".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update({"course_id": self.id})
return ContentMigration(self._requester, response_json)
def create_course_section(self, **kwargs):
"""
Create a new section for this course.
:calls: `POST /api/v1/courses/:course_id/sections \
<https://canvas.instructure.com/doc/api/sections.html#method.sections.create>`_
:rtype: :class:`canvasapi.course.Section`
"""
from canvasapi.section import Section
response = self._requester.request(
"POST",
"courses/{}/sections".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return Section(self._requester, response.json())
def create_custom_column(self, column, **kwargs):
"""
Create a custom gradebook column.
:calls: `POST /api/v1/courses/:course_id/custom_gradebook_columns \
<https://canvas.instructure.com/doc/api/custom_gradebook_columns.html#method.custom_gradebook_columns_api.create>`_
:param column: A dictionary representing the Custom Gradebook Column to create
:type column: dict
:rtype: :class:`canvasapi.custom_gradebook_columns.CustomGradebookColumn`
"""
if isinstance(column, dict) and "title" in column:
kwargs["column"] = column
else:
raise RequiredFieldMissing("Dictionary with key 'title' is required.")
response = self._requester.request(
"POST",
"courses/{}/custom_gradebook_columns".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
column_json = response.json()
column_json.update({"course_id": self.id})
return CustomGradebookColumn(self._requester, column_json)
def create_discussion_topic(self, **kwargs):
"""
Creates a new discussion topic for the course or group.
:calls: `POST /api/v1/courses/:course_id/discussion_topics \
<https://canvas.instructure.com/doc/api/discussion_topics.html#method.discussion_topics.create>`_
:rtype: :class:`canvasapi.discussion_topic.DiscussionTopic`
"""
response = self._requester.request(
"POST",
"courses/{}/discussion_topics".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update({"course_id": self.id})
return DiscussionTopic(self._requester, response_json)
def create_epub_export(self, **kwargs):
"""
Create an ePub export for a course.
:calls: `POST /api/v1/courses/:course_id/epub_exports/:id\
<https://canvas.instructure.com/doc/api/e_pub_exports.html#method.epub_exports.create>`_
:rtype: :class:`canvasapi.course_epub_export.CourseEpubExport`
"""
response = self._requester.request(
"POST",
"courses/{}/epub_exports/".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return CourseEpubExport(self._requester, response.json())
def create_external_feed(self, url, **kwargs):
"""
Create a new external feed for the course.
:calls: `POST /api/v1/courses/:course_id/external_feeds \
<https://canvas.instructure.com/doc/api/announcement_external_feeds.html#method.external_feeds.create>`_
:param url: The url of the external rss or atom feed
:type url: str
:rtype: :class:`canvasapi.external_feed.ExternalFeed`
"""
from canvasapi.external_feed import ExternalFeed
response = self._requester.request(
"POST",
"courses/{}/external_feeds".format(self.id),
url=url,
_kwargs=combine_kwargs(**kwargs),
)
return ExternalFeed(self._requester, response.json())
def create_external_tool(self, **kwargs):
"""
Create an external tool in the current course.
:calls: `POST /api/v1/courses/:course_id/external_tools \
<https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.create>`_
:param name: The name of the tool
:type name: str
:rtype: :class:`canvasapi.external_tool.ExternalTool`
"""
from canvasapi.external_tool import ExternalTool
required_params = ("name", "privacy_level", "consumer_key", "shared_secret")
if "client_id" not in kwargs and not all(x in kwargs for x in required_params):
raise RequiredFieldMissing(
"Must pass either `client_id` parameter or "
"`name`, `privacy_level`, `consumer_key`, and `shared_secret` parameters."
)
response = self._requester.request(
"POST",
"courses/{}/external_tools".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update({"course_id": self.id})
return ExternalTool(self._requester, response_json)
def create_folder(self, name, **kwargs):
"""
Creates a folder in this course.
:calls: `POST /api/v1/courses/:course_id/folders \
<https://canvas.instructure.com/doc/api/files.html#method.folders.create>`_
:param name: The name of the folder.
:type name: str
:rtype: :class:`canvasapi.folder.Folder`
"""
response = self._requester.request(
"POST",
"courses/{}/folders".format(self.id),
name=name,
_kwargs=combine_kwargs(**kwargs),
)
return Folder(self._requester, response.json())
def create_group_category(self, name, **kwargs):
"""
Create a group category.
:calls: `POST /api/v1/courses/:course_id/group_categories \
<https://canvas.instructure.com/doc/api/group_categories.html#method.group_categories.create>`_
:param name: Name of the category.
:type name: str
:rtype: :class:`canvasapi.group.GroupCategory`
"""
from canvasapi.group import GroupCategory
response = self._requester.request(
"POST",
"courses/{}/group_categories".format(self.id),
name=name,
_kwargs=combine_kwargs(**kwargs),
)
return GroupCategory(self._requester, response.json())
def create_late_policy(self, **kwargs):
"""
Create a late policy. If the course already has a late policy, a bad_request
is returned since there can only be one late policy per course.
:calls: `POST /api/v1/courses/:id/late_policy \
<https://canvas.instructure.com/doc/api/late_policy.html#method.late_policy.create>`_
:rtype: :class:`canvasapi.course.LatePolicy`
"""
response = self._requester.request(
"POST",
"courses/{}/late_policy".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
late_policy_json = response.json()
return LatePolicy(self._requester, late_policy_json["late_policy"])
def create_module(self, module, **kwargs):
"""
Create a new module.
:calls: `POST /api/v1/courses/:course_id/modules \
<https://canvas.instructure.com/doc/api/modules.html#method.context_modules_api.create>`_
:param module: The attributes for the module.
:type module: dict
:returns: The created module.
:rtype: :class:`canvasapi.module.Module`
"""
from canvasapi.module import Module
if isinstance(module, dict) and "name" in module:
kwargs["module"] = module
else:
raise RequiredFieldMissing("Dictionary with key 'name' is required.")
response = self._requester.request(
"POST",
"courses/{}/modules".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
module_json = response.json()
module_json.update({"course_id": self.id})
return Module(self._requester, module_json)
def create_page(self, wiki_page, **kwargs):
"""
Create a new wiki page.
:calls: `POST /api/v1/courses/:course_id/pages \
<https://canvas.instructure.com/doc/api/pages.html#method.wiki_pages_api.create>`_
:param wiki_page: The title for the page.
:type wiki_page: dict
:returns: The created page.
:rtype: :class:`canvasapi.page.Page`
"""
if isinstance(wiki_page, dict) and "title" in wiki_page:
kwargs["wiki_page"] = wiki_page
else:
raise RequiredFieldMissing("Dictionary with key 'title' is required.")
response = self._requester.request(
"POST", "courses/{}/pages".format(self.id), _kwargs=combine_kwargs(**kwargs)
)
page_json = response.json()
page_json.update({"course_id": self.id})
return Page(self._requester, page_json)
def create_quiz(self, quiz, **kwargs):
"""
Create a new quiz in this course.
:calls: `POST /api/v1/courses/:course_id/quizzes \
<https://canvas.instructure.com/doc/api/quizzes.html#method.quizzes/quizzes_api.create>`_
:param quiz: The attributes for the quiz.
:type quiz: dict
:rtype: :class:`canvasapi.quiz.Quiz`
"""
from canvasapi.quiz import Quiz
if isinstance(quiz, dict) and "title" in quiz:
kwargs["quiz"] = quiz
else:
raise RequiredFieldMissing("Dictionary with key 'title' is required.")
response = self._requester.request(
"POST",
"courses/{}/quizzes".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
quiz_json = response.json()
quiz_json.update({"course_id": self.id})
return Quiz(self._requester, quiz_json)
def create_rubric(self, **kwargs):
"""
Create a new rubric.
:calls: `POST /api/v1/courses/:course_id/rubrics \
<https://canvas.instructure.com/doc/api/rubrics.html#method.rubrics.create>`_
:returns: Returns a dictionary with rubric and rubric association.
:rtype: `dict`
"""
response = self._requester.request(
"POST",
"courses/{}/rubrics".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
dictionary = response.json()
rubric_dict = {}
if "rubric" in dictionary:
r_dict = dictionary["rubric"]
rubric = Rubric(self._requester, r_dict)
rubric_dict = {"rubric": rubric}
if "rubric_association" in dictionary:
ra_dict = dictionary["rubric_association"]
rubric_association = RubricAssociation(self._requester, ra_dict)
rubric_dict.update({"rubric_association": rubric_association})
return rubric_dict
def create_rubric_association(self, **kwargs):
"""
Create a new RubricAssociation.
:calls: `POST /api/v1/courses/:course_id/rubric_associations \
<https://canvas.instructure.com/doc/api/rubrics.html#method.rubric_associations.create>`_
:returns: Returns a RubricAssociation.
:rtype: :class:`canvasapi.rubric.RubricAssociation`
"""
from canvasapi.rubric import RubricAssociation
response = self._requester.request(
"POST",
"courses/{}/rubric_associations".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
quiz_json = response.json()
quiz_json.update({"course_id": self.id})
return RubricAssociation(self._requester, quiz_json)
def delete(self, **kwargs):
"""
Permanently delete this course.
:calls: `DELETE /api/v1/courses/:id \
<https://canvas.instructure.com/doc/api/courses.html#method.courses.destroy>`_
:returns: True if the course was deleted, False otherwise.
:rtype: bool
"""
kwargs["event"] = "delete"
response = self._requester.request(
"DELETE",
"courses/{}".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return response.json().get("delete")
def delete_external_feed(self, feed, **kwargs):
"""
Deletes the external feed.
:calls: `DELETE /api/v1/courses/:course_id/external_feeds/:external_feed_id \
<https://canvas.instructure.com/doc/api/announcement_external_feeds.html#method.external_feeds.destroy>`_
:param feed: The object or ID of the feed to be deleted.
:type feed: :class:`canvasapi.external_feed.ExternalFeed` or int
:rtype: :class:`canvasapi.external_feed.ExternalFeed`
"""
from canvasapi.external_feed import ExternalFeed
feed_id = obj_or_id(feed, "feed", (ExternalFeed,))
response = self._requester.request(
"DELETE",
"courses/{}/external_feeds/{}".format(self.id, feed_id),
_kwargs=combine_kwargs(**kwargs),
)
return ExternalFeed(self._requester, response.json())
def edit_front_page(self, **kwargs):
"""
Update the title or contents of the front page.
:calls: `PUT /api/v1/courses/:course_id/front_page \
<https://canvas.instructure.com/doc/api/pages.html#method.wiki_pages_api.update_front_page>`_
:rtype: :class:`canvasapi.course.Course`
"""
response = self._requester.request(
"PUT",
"courses/{}/front_page".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
page_json = response.json()
page_json.update({"course_id": self.id})
return Page(self._requester, page_json)
def edit_late_policy(self, **kwargs):
"""
Patch a late policy. No body is returned upon success.
:calls: `PATCH /api/v1/courses/:id/late_policy \
<https://canvas.instructure.com/doc/api/late_policy.html#method.late_policy.update>`_
:returns: True if Late Policy was updated successfully. False otherwise.
:rtype: bool
"""
response = self._requester.request(
"PATCH",
"courses/{}/late_policy".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return response.status_code == 204
def enroll_user(self, user, enrollment_type=None, **kwargs):
"""
Create a new user enrollment for a course or a section.
:calls: `POST /api/v1/courses/:course_id/enrollments \
<https://canvas.instructure.com/doc/api/enrollments.html#method.enrollments_api.create>`_
:param user: The object or ID of the user to enroll in this course.
:type user: :class:`canvasapi.user.User` or int
:param enrollment_type: The type of enrollment.
:type enrollment_type: str, optional
:rtype: :class:`canvasapi.enrollment.Enrollment`
"""
from canvasapi.enrollment import Enrollment
from canvasapi.user import User
kwargs["enrollment[user_id]"] = obj_or_id(user, "user", (User,))
if enrollment_type:
warnings.warn(
(
"The `enrollment_type` argument is deprecated and will be "
"removed in a future version.\n"
"Use `enrollment[type]` as a keyword argument instead. "
"e.g. `enroll_user(enrollment={'type': 'StudentEnrollment'})`"
),
DeprecationWarning,
)
kwargs["enrollment[type]"] = enrollment_type
response = self._requester.request(
"POST",
"courses/{}/enrollments".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return Enrollment(self._requester, response.json())
def export_content(self, export_type, **kwargs):
"""
Begin a content export job for a course.
:calls: `POST /api/v1/courses/:course_id/content_exports\
<https://canvas.instructure.com/doc/api/content_exports.html#method.content_exports_api.create>`_
:param export_type: The type of content to export.
:type export_type: str
:rtype: :class:`canvasapi.content_export.ContentExport`
"""
from canvasapi.content_export import ContentExport
kwargs["export_type"] = export_type
response = self._requester.request(
"POST",
"courses/{}/content_exports".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return ContentExport(self._requester, response.json())
def get_all_outcome_links_in_context(self, **kwargs):
"""
Get all outcome links for context - BETA
:calls: `GET /api/v1/courses/:course_id/outcome_group_links \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.link_index>`_
:returns: Paginated List of OutcomesLinks in the context.
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.outcome.OutcomeLink`
"""
from canvasapi.outcome import OutcomeLink
return PaginatedList(
OutcomeLink,
self._requester,
"GET",
"courses/{}/outcome_group_links".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def get_assignment(self, assignment, **kwargs):
"""
Return the assignment with the given ID.
:calls: `GET /api/v1/courses/:course_id/assignments/:id \
<https://canvas.instructure.com/doc/api/assignments.html#method.assignments_api.show>`_
:param assignment: The object or ID of the assignment to retrieve.
:type assignment: :class:`canvasapi.assignment.Assignment` or int
:rtype: :class:`canvasapi.assignment.Assignment`
"""
from canvasapi.assignment import Assignment
assignment_id = obj_or_id(assignment, "assignment", (Assignment,))
response = self._requester.request(
"GET",
"courses/{}/assignments/{}".format(self.id, assignment_id),
_kwargs=combine_kwargs(**kwargs),
)
return Assignment(self._requester, response.json())
def get_assignment_group(self, assignment_group, **kwargs):
"""
Retrieve specified assignment group for the specified course.
:calls: `GET /api/v1/courses/:course_id/assignment_groups/:assignment_group_id \
<https://canvas.instructure.com/doc/api/assignment_groups.html#method.assignment_groups_api.show>`_
:param assignment_group: object or ID of assignment group.
:type assignment_group: :class:`canvasapi.assignment.AssignmentGroup` or int
:rtype: :class:`canvasapi.assignment.AssignmentGroup`
"""
from canvasapi.assignment import AssignmentGroup
assignment_group_id = obj_or_id(
assignment_group, "assignment_group", (AssignmentGroup,)
)
response = self._requester.request(
"GET",
"courses/{}/assignment_groups/{}".format(self.id, assignment_group_id),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update({"course_id": self.id})
return AssignmentGroup(self._requester, response_json)
def get_assignment_groups(self, **kwargs):
"""
List assignment groups for the specified course.
:calls: `GET /api/v1/courses/:course_id/assignment_groups \
<https://canvas.instructure.com/doc/api/assignment_groups.html#method.assignment_groups.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.assignment.AssignmentGroup`
"""
from canvasapi.assignment import AssignmentGroup
return PaginatedList(
AssignmentGroup,
self._requester,
"GET",
"courses/{}/assignment_groups".format(self.id),
{"course_id": self.id},
_kwargs=combine_kwargs(**kwargs),
)
def get_assignment_overrides(self, assignment_overrides, **kwargs):
"""
List the specified overrides in this course, providing they target
sections/groups/students visible to the current user.
:calls: `GET /api/v1/courses/:course_id/assignments/overrides \
<https://canvas.instructure.com/doc/api/assignments.html#method.assignment_overrides.batch_retrieve>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.assignment.AssignmentOverride`
"""
from canvasapi.assignment import AssignmentOverride
kwargs["assignment_overrides"] = assignment_overrides
return PaginatedList(
AssignmentOverride,
self._requester,
"GET",
"courses/{}/assignments/overrides".format(self.id),
{"course_id": self.id},
_kwargs=combine_kwargs(**kwargs),
)
def get_assignments(self, **kwargs):
"""
List all of the assignments in this course.
:calls: `GET /api/v1/courses/:course_id/assignments \
<https://canvas.instructure.com/doc/api/assignments.html#method.assignments_api.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.assignment.Assignment`
"""
from canvasapi.assignment import Assignment
return PaginatedList(
Assignment,
self._requester,
"GET",
"courses/{}/assignments".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def get_assignments_for_group(self, assignment_group, **kwargs):
"""
Returns a paginated list of assignments for the given assignment group
:calls: `GET /api/v1/courses/:course_id/assignment_groups/:assignment_group_id/assignments\
<https://canvas.instructure.com/doc/api/assignments.html#method.assignments_api.index>`_
:param assignment_group: The object or id of the assignment group
:type assignment_group: :class: `canvasapi.assignment.AssignmentGroup` or int
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.assignment.Assignment`
"""
assignment_group_id = obj_or_id(
assignment_group, "assignment_group", (AssignmentGroup,)
)
return PaginatedList(
Assignment,
self._requester,
"GET",
"courses/{}/assignment_groups/{}/assignments".format(
self.id, assignment_group_id
),
_kwargs=combine_kwargs(**kwargs),
)
def get_blueprint(self, template="default", **kwargs):
"""
Return the blueprint of a given ID.
:calls: `GET /api/v1/courses/:course_id/blueprint_templates/:template_id \
<https://canvas.instructure.com/doc/api/blueprint_courses.html#method.master_courses/master_templates.show>`_
:param template: The object or ID of the blueprint template to get.
:type template: int or :class:`canvasapi.blueprint.BlueprintTemplate`
:rtype: :class:`canvasapi.blueprint.BlueprintTemplate`
"""
from canvasapi.blueprint import BlueprintTemplate
if template == "default":
template_id = template
else:
template_id = obj_or_id(template, "template", (BlueprintTemplate,))
response = self._requester.request(
"GET",
"courses/{}/blueprint_templates/{}".format(self.id, template_id),
_kwargs=combine_kwargs(**kwargs),
)
return BlueprintTemplate(self._requester, response.json())
def get_collaborations(self, **kwargs):
"""
Return a list of collaborations for a given course ID.
:calls: `GET /api/v1/courses/:course_id/collaborations \
<https://canvas.instructure.com/doc/api/collaborations.html#method.collaborations.api_index>`_
:rtype: :class:`canvasapi.collaboration.Collaboration`
"""
return PaginatedList(
Collaboration,
self._requester,
"GET",
"courses/{}/collaborations".format(self.id),
_root="collaborations",
kwargs=combine_kwargs(**kwargs),
)
def get_content_export(self, content_export, **kwargs):
"""
Return information about a single content export.
:calls: `GET /api/v1/courses/:course_id/content_exports/:id\
<https://canvas.instructure.com/doc/api/content_exports.html#method.content_exports_api.show>`_
:param content_export: The object or ID of the content export to show.
:type content_export: int or :class:`canvasapi.content_export.ContentExport`
:rtype: :class:`canvasapi.content_export.ContentExport`
"""
from canvasapi.content_export import ContentExport
export_id = obj_or_id(content_export, "content_export", (ContentExport,))
response = self._requester.request(
"GET",
"courses/{}/content_exports/{}".format(self.id, export_id),
_kwargs=combine_kwargs(**kwargs),
)
return ContentExport(self._requester, response.json())
def get_content_exports(self, **kwargs):
"""
Return a paginated list of the past and pending content export jobs for a course.
:calls: `GET /api/v1/courses/:course_id/content_exports\
<https://canvas.instructure.com/doc/api/content_exports.html#method.content_exports_api.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.content_export.ContentExport`
"""
from canvasapi.content_export import ContentExport
return PaginatedList(
ContentExport,
self._requester,
"GET",
"courses/{}/content_exports".format(self.id),
kwargs=combine_kwargs(**kwargs),
)
def get_content_migration(self, content_migration, **kwargs):
"""
Retrive a content migration by its ID
:calls: `GET /api/v1/courses/:course_id/content_migrations/:id \
<https://canvas.instructure.com/doc/api/content_migrations.html#method.content_migrations.show>`_
:param content_migration: The object or ID of the content migration to retrieve.
:type content_migration: int, str or :class:`canvasapi.content_migration.ContentMigration`
:rtype: :class:`canvasapi.content_migration.ContentMigration`
"""
from canvasapi.content_migration import ContentMigration
migration_id = obj_or_id(
content_migration, "content_migration", (ContentMigration,)
)
response = self._requester.request(
"GET",
"courses/{}/content_migrations/{}".format(self.id, migration_id),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update({"course_id": self.id})
return ContentMigration(self._requester, response_json)
def get_content_migrations(self, **kwargs):
"""
List content migrations that the current account can view or manage.
:calls: `GET /api/v1/courses/:course_id/content_migrations/ \
<https://canvas.instructure.com/doc/api/content_migrations.html#method.content_migrations.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.content_migration.ContentMigration`
"""
from canvasapi.content_migration import ContentMigration
return PaginatedList(
ContentMigration,
self._requester,
"GET",
"courses/{}/content_migrations".format(self.id),
{"course_id": self.id},
_kwargs=combine_kwargs(**kwargs),
)
def get_course_level_assignment_data(self, **kwargs):
"""
Return a list of assignments for the course sorted by due date
:calls: `GET /api/v1/courses/:course_id/analytics/assignments \
<https://canvas.instructure.com/doc/api/analytics.html#method.analytics_api.course_assignments>`_
:rtype: dict
"""
response = self._requester.request(
"GET",
"courses/{}/analytics/assignments".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return response.json()
def get_course_level_participation_data(self, **kwargs):
"""
Return page view hits and participation numbers grouped by day through the course's history
:calls: `GET /api/v1/courses/:course_id/analytics/activity \
<https://canvas.instructure.com/doc/api/analytics.html#method.analytics_api.course_participation>`_
:rtype: dict
"""
response = self._requester.request(
"GET",
"courses/{}/analytics/activity".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return response.json()
def get_course_level_student_summary_data(self, **kwargs):
"""
Return a summary of per-user access information for all students in a course
:calls: `GET /api/v1/courses/:course_id/analytics/student_summaries \
<https://canvas.instructure.com/doc/api/analytics.html#method.analytics_api.course_student_summaries>`_
:rtype: dict
"""
response = self._requester.request(
"GET",
"courses/{}/analytics/student_summaries".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return response.json()
def get_custom_columns(self, **kwargs):
"""
List of all the custom gradebook columns for a course.
:calls: `GET /api/v1/courses/:course_id/custom_gradebook_columns \
<https://canvas.instructure.com/doc/api/custom_gradebook_columns.html#method.custom_gradebook_columns_api.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.custom_gradebook_columns.CustomGradebookColumn`
"""
return PaginatedList(
CustomGradebookColumn,
self._requester,
"GET",
"courses/{}/custom_gradebook_columns".format(self.id),
{"course_id": self.id},
_kwargs=combine_kwargs(**kwargs),
)
def get_discussion_topic(self, topic, **kwargs):
"""
Return data on an individual discussion topic.
:calls: `GET /api/v1/courses/:course_id/discussion_topics/:topic_id \
<https://canvas.instructure.com/doc/api/discussion_topics.html#method.discussion_topics_api.show>`_
:param topic: The object or ID of the discussion topic.
:type topic: :class:`canvasapi.discussion_topic.DiscussionTopic` or int
:rtype: :class:`canvasapi.discussion_topic.DiscussionTopic`
"""
topic_id = obj_or_id(topic, "topic", (DiscussionTopic,))
response = self._requester.request(
"GET",
"courses/{}/discussion_topics/{}".format(self.id, topic_id),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update({"course_id": self.id})
return DiscussionTopic(self._requester, response_json)
def get_discussion_topics(self, **kwargs):
"""
Returns the paginated list of discussion topics for this course or group.
:calls: `GET /api/v1/courses/:course_id/discussion_topics \
<https://canvas.instructure.com/doc/api/discussion_topics.html#method.discussion_topics.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.discussion_topic.DiscussionTopic`
"""
return PaginatedList(
DiscussionTopic,
self._requester,
"GET",
"courses/{}/discussion_topics".format(self.id),
{"course_id": self.id},
_kwargs=combine_kwargs(**kwargs),
)
def get_enabled_features(self, **kwargs):
"""
Lists all enabled features in a course.
:calls: `GET /api/v1/courses/:course_id/features/enabled \
<https://canvas.instructure.com/doc/api/feature_flags.html#method.feature_flags.enabled_features>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.feature.Feature`
"""
return PaginatedList(
Feature,
self._requester,
"GET",
"courses/{}/features/enabled".format(self.id),
{"course_id": self.id},
_kwargs=combine_kwargs(**kwargs),
)
def get_enrollments(self, **kwargs):
"""
List all of the enrollments in this course.
:calls: `GET /api/v1/courses/:course_id/enrollments \
<https://canvas.instructure.com/doc/api/enrollments.html#method.enrollments_api.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.enrollment.Enrollment`
"""
from canvasapi.enrollment import Enrollment
return PaginatedList(
Enrollment,
self._requester,
"GET",
"courses/{}/enrollments".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def get_epub_export(self, epub, **kwargs):
"""
Get information about a single epub export.
:calls: `GET /api/v1/courses/:course_id/epub_exports/:id\
<https://canvas.instructure.com/doc/api/e_pub_exports.html#method.epub_exports.show>`_
:param epub: Object or ID of ePub Export
:type epub: int or :class:`canvasapi.course_epub_export.CourseEpubExport`
:rtype: :class:`canvasapi.course_epub_export.CourseEpubExport`
"""
epub_id = obj_or_id(epub, "epub", (CourseEpubExport,))
response = self._requester.request(
"GET",
"courses/{}/epub_exports/{}".format(self.id, epub_id),
_kwargs=combine_kwargs(**kwargs),
)
return CourseEpubExport(self._requester, response.json())
def get_external_feeds(self, **kwargs):
"""
Returns the list of External Feeds this course.
:calls: `GET /api/v1/courses/:course_id/external_feeds \
<https://canvas.instructure.com/doc/api/announcement_external_feeds.html#method.external_feeds.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.external_feed.ExternalFeed`
"""
from canvasapi.external_feed import ExternalFeed
return PaginatedList(
ExternalFeed,
self._requester,
"GET",
"courses/{}/external_feeds".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def get_external_tool(self, tool, **kwargs):
"""
:calls: `GET /api/v1/courses/:course_id/external_tools/:external_tool_id \
<https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.show>`_
:param tool: The object or ID of the tool to retrieve.
:type tool: :class:`canvasapi.external_tool.ExternalTool` or int
:rtype: :class:`canvasapi.external_tool.ExternalTool`
"""
from canvasapi.external_tool import ExternalTool
tool_id = obj_or_id(tool, "tool", (ExternalTool,))
response = self._requester.request(
"GET",
"courses/{}/external_tools/{}".format(self.id, tool_id),
_kwargs=combine_kwargs(**kwargs),
)
tool_json = response.json()
tool_json.update({"course_id": self.id})
return ExternalTool(self._requester, tool_json)
def get_external_tools(self, **kwargs):
"""
:calls: `GET /api/v1/courses/:course_id/external_tools \
<https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.external_tool.ExternalTool`
"""
from canvasapi.external_tool import ExternalTool
return PaginatedList(
ExternalTool,
self._requester,
"GET",
"courses/{}/external_tools".format(self.id),
{"course_id": self.id},
_kwargs=combine_kwargs(**kwargs),
)
def get_feature_flag(self, feature, **kwargs):
"""
Return the feature flag that applies to given course.
:calls: `GET /api/v1/courses/:course_id/features/flags/:feature \
<https://canvas.instructure.com/doc/api/feature_flags.html#method.feature_flags.show>`_
:param feature: The feature object or name of the feature to retrieve.
:type feature: :class:`canvasapi.feature.Feature` or str
:rtype: :class:`canvasapi.feature.FeatureFlag`
"""
feature_name = obj_or_str(feature, "name", (Feature,))
response = self._requester.request(
"GET",
"courses/{}/features/flags/{}".format(self.id, feature_name),
_kwargs=combine_kwargs(**kwargs),
)
return FeatureFlag(self._requester, response.json())
def get_features(self, **kwargs):
"""
Lists all features of a course.
:calls: `GET /api/v1/courses/:course_id/features \
<https://canvas.instructure.com/doc/api/feature_flags.html#method.feature_flags.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.feature.Feature`
"""
return PaginatedList(
Feature,
self._requester,
"GET",
"courses/{}/features".format(self.id),
{"course_id": self.id},
_kwargs=combine_kwargs(**kwargs),
)
def get_file(self, file, **kwargs):
"""
Return the standard attachment json object for a file.
:calls: `GET /api/v1/courses/:course_id/files/:id \
<https://canvas.instructure.com/doc/api/files.html#method.files.api_show>`_
:param file: The object or ID of the file to retrieve.
:type file: :class:`canvasapi.file.File` or int
:rtype: :class:`canvasapi.file.File`
"""
from canvasapi.file import File
file_id = obj_or_id(file, "file", (File,))
response = self._requester.request(
"GET",
"courses/{}/files/{}".format(self.id, file_id),
_kwargs=combine_kwargs(**kwargs),
)
return File(self._requester, response.json())
def get_file_quota(self, **kwargs):
"""
Returns the total and used storage quota for the course.
:calls: `GET /api/v1/courses/:course_id/files/quota \
<https://canvas.instructure.com/doc/api/files.html#method.files.api_quota>`_
:rtype: dict
"""
response = self._requester.request(
"GET",
"courses/{}/files/quota".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return response.json()
def get_files(self, **kwargs):
"""
Returns the paginated list of files for the course.
:calls: `GET /api/v1/courses/:course_id/files \
<https://canvas.instructure.com/doc/api/files.html#method.files.api_index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.file.File`
"""
from canvasapi.file import File
return PaginatedList(
File,
self._requester,
"GET",
"courses/{}/files".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def get_folder(self, folder, **kwargs):
"""
Returns the details for a course folder
:calls: `GET /api/v1/courses/:course_id/folders/:id \
<https://canvas.instructure.com/doc/api/files.html#method.folders.show>`_
:param folder: The object or ID of the folder to retrieve.
:type folder: :class:`canvasapi.folder.Folder` or int
:rtype: :class:`canvasapi.folder.Folder`
"""
folder_id = obj_or_id(folder, "folder", (Folder,))
response = self._requester.request(
"GET",
"courses/{}/folders/{}".format(self.id, folder_id),
_kwargs=combine_kwargs(**kwargs),
)
return Folder(self._requester, response.json())
def get_folders(self, **kwargs):
"""
Returns the paginated list of all folders for the given course. This will be returned as a
flat list containing all subfolders as well.
:calls: `GET /api/v1/courses/:course_id/folders \
<https://canvas.instructure.com/doc/api/files.html#method.folders.list_all_folders>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.folder.Folder`
"""
return PaginatedList(
Folder,
self._requester,
"GET",
"courses/{}/folders".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def get_full_discussion_topic(self, topic, **kwargs):
"""
Return a cached structure of the discussion topic.
:calls: `GET /api/v1/courses/:course_id/discussion_topics/:topic_id/view \
<https://canvas.instructure.com/doc/api/discussion_topics.html#method.discussion_topics_api.view>`_
:param topic: The object or ID of the discussion topic.
:type topic: :class:`canvasapi.discussion_topic.DiscussionTopic` or int
:rtype: dict
"""
topic_id = obj_or_id(topic, "topic", (DiscussionTopic,))
response = self._requester.request(
"GET",
"courses/{}/discussion_topics/{}/view".format(self.id, topic_id),
_kwargs=combine_kwargs(**kwargs),
)
return response.json()
def get_gradebook_history_dates(self, **kwargs):
"""
Returns a map of dates to grader/assignment groups
:calls: `GET /api/v1/courses/:course_id/gradebook_history/days\
<https://canvas.instructure.com/doc/api/gradebook_history.html#method.gradebook_history_api.days>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.grading_history.Day`
"""
return PaginatedList(
Day,
self._requester,
"GET",
"courses/{}/gradebook_history/days".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def get_gradebook_history_details(self, date, **kwargs):
"""
Returns the graders who worked on this day, along with the
assignments they worked on. More details can be obtained by
selecting a grader and assignment and calling the 'submissions'
api endpoint for a given date.
:calls: `GET /api/v1/courses/:course_id/gradebook_history/:date\
<https://canvas.instructure.com/doc/api/gradebook_history.html#method.\
gradebook_history_api.day_details>`_
:param date: The date for which you would like to see detailed information.
:type date: int
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.gradebook_history.Grader`
"""
return PaginatedList(
Grader,
self._requester,
"GET",
"courses/{}/gradebook_history/{}".format(self.id, date),
kwargs=combine_kwargs(**kwargs),
)
def get_grading_period(self, grading_period, **kwargs):
"""
Return a single grading period for the associated course and id.
:calls: `GET /api/v1/courses/:course_id/grading_periods/:id\
<https://canvas.instructure.com/doc/api/grading_periods.html#method.grading_periods.index>`_
:param grading_period_id: The ID of the rubric.
:type grading_period_id: int
:rtype: :class:`canvasapi.grading_period.GradingPeriod`
"""
response = self._requester.request(
"GET",
"courses/{}/grading_periods/{}".format(self.id, grading_period),
_kwargs=combine_kwargs(**kwargs),
)
response_grading_period = response.json()["grading_periods"][0]
response_grading_period.update({"course_id": self.id})
return GradingPeriod(self._requester, response_grading_period)
def get_grading_periods(self, **kwargs):
"""
Return a list of grading periods for the associated course.
:calls: `GET /api/v1/courses/:course_id/grading_periods\
<https://canvas.instructure.com/doc/api/grading_periods.html#method.grading_periods.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.grading_period.GradingPeriod`
"""
return PaginatedList(
GradingPeriod,
self._requester,
"GET",
"courses/{}/grading_periods".format(self.id),
{"course_id": self.id},
_root="grading_periods",
kwargs=combine_kwargs(**kwargs),
)
def get_grading_standards(self, **kwargs):
"""
Get a PaginatedList of the grading standards available for the course
:calls: `GET /api/v1/courses/:course_id/grading_standards \
<https://canvas.instructure.com/doc/api/grading_standards.html#method.grading_standards_api.context_index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.grading_standards.GradingStandard`
"""
return PaginatedList(
GradingStandard,
self._requester,
"GET",
"courses/%s/grading_standards" % (self.id),
_kwargs=combine_kwargs(**kwargs),
)
def get_group_categories(self, **kwargs):
"""
List group categories for a context.
:calls: `GET /api/v1/courses/:course_id/group_categories \
<https://canvas.instructure.com/doc/api/group_categories.html#method.group_categories.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.group.GroupCategory`
"""
from canvasapi.group import GroupCategory
return PaginatedList(
GroupCategory,
self._requester,
"GET",
"courses/{}/group_categories".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def get_groups(self, **kwargs):
"""
Return list of active groups for the specified course.
:calls: `GET /api/v1/courses/:course_id/groups \
<https://canvas.instructure.com/doc/api/groups.html#method.groups.context_index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.course.Course`
"""
from canvasapi.group import Group
return PaginatedList(
Group,
self._requester,
"GET",
"courses/{}/groups".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def get_late_policy(self, **kwargs):
"""
Returns the late policy for a course.
:calls: `GET /api/v1/courses/:id/late_policy \
<https://canvas.instructure.com/doc/api/late_policy.html#method.late_policy.show>`_
:rtype: :class:`canvasapi.course.LatePolicy`
"""
response = self._requester.request(
"GET",
"courses/{}/late_policy".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
late_policy_json = response.json()
return LatePolicy(self._requester, late_policy_json["late_policy"])
def get_licenses(self, **kwargs):
"""
Returns a paginated list of the licenses that can be applied to the
files under the course scope
:calls: `GET /api/v1/course/:course_id/content_licenses \
<https://canvas.instructure.com/doc/api/files.html#method.usage_rights.licenses>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.license.License`
"""
return PaginatedList(
License,
self._requester,
"GET",
"courses/{}/content_licenses".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def get_migration_systems(self, **kwargs):
"""
Return a list of migration systems.
:calls: `GET /api/v1/courses/:course_id/content_migrations/migrators \
<https://canvas.instructure.com/doc/api/content_migrations.html#method.content_migrations.available_migrators>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.content_migration.Migrator`
"""
from canvasapi.content_migration import Migrator
return PaginatedList(
Migrator,
self._requester,
"GET",
"courses/{}/content_migrations/migrators".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def get_module(self, module, **kwargs):
"""
Retrieve a single module by ID.
:calls: `GET /api/v1/courses/:course_id/modules/:id \
<https://canvas.instructure.com/doc/api/modules.html#method.context_modules_api.show>`_
:param module: The object or ID of the module to retrieve.
:type module: :class:`canvasapi.module.Module` or int
:rtype: :class:`canvasapi.module.Module`
"""
from canvasapi.module import Module
module_id = obj_or_id(module, "module", (Module,))
response = self._requester.request(
"GET", "courses/{}/modules/{}".format(self.id, module_id)
)
module_json = response.json()
module_json.update({"course_id": self.id})
return Module(self._requester, module_json)
def get_modules(self, **kwargs):
"""
Return a list of modules in this course.
:calls: `GET /api/v1/courses/:course_id/modules \
<https://canvas.instructure.com/doc/api/modules.html#method.context_modules_api.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.module.Module`
"""
from canvasapi.module import Module
return PaginatedList(
Module,
self._requester,
"GET",
"courses/{}/modules".format(self.id),
{"course_id": self.id},
_kwargs=combine_kwargs(**kwargs),
)
def get_multiple_submissions(self, **kwargs):
"""
List submissions for multiple assignments.
Get all existing submissions for a given set of students and assignments.
:calls: `GET /api/v1/courses/:course_id/students/submissions \
<https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.for_students>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.submission.Submission`
"""
is_grouped = kwargs.get("grouped", False)
if normalize_bool(is_grouped, "grouped"):
cls = GroupedSubmission
else:
cls = Submission
return PaginatedList(
cls,
self._requester,
"GET",
"courses/{}/students/submissions".format(self.id),
{"course_id": self.id},
_kwargs=combine_kwargs(**kwargs),
)
def get_outcome_group(self, group, **kwargs):
"""
Returns the details of the Outcome Group with the given id.
:calls: `GET /api/v1/courses/:course_id/outcome_groups/:id \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.show>`_
:param group: The outcome group object or ID to return.
:type group: :class:`canvasapi.outcome.OutcomeGroup` or int
:returns: An outcome group object.
:rtype: :class:`canvasapi.outcome.OutcomeGroup`
"""
from canvasapi.outcome import OutcomeGroup
outcome_group_id = obj_or_id(group, "group", (OutcomeGroup,))
response = self._requester.request(
"GET", "courses/{}/outcome_groups/{}".format(self.id, outcome_group_id)
)
return OutcomeGroup(self._requester, response.json())
def get_outcome_groups_in_context(self, **kwargs):
"""
Get all outcome groups for context - BETA
:calls: `GET /api/v1/courses/:course_id/outcome_groups \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.index>`_
:returns: Paginated List of OutcomesGroups in the context.
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.outcome.OutcomeGroups`
"""
from canvasapi.outcome import OutcomeGroup
return PaginatedList(
OutcomeGroup,
self._requester,
"GET",
"courses/{}/outcome_groups".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def get_outcome_import_status(self, outcome_import, **kwargs):
"""
Get the status of an already created Outcome import.
Pass 'latest' for the outcome import id for the latest import.
:calls: `GET /api/v1/courses/:course_id/outcome_imports/:id \
<https://canvas.instructure.com/doc/api/outcome_imports.html#method.outcome_imports_api.show>`_
:param outcome_import: The outcome import object or ID to get the status of.
:type outcome_import: :class:`canvasapi.outcome_import.OutcomeImport`,
int, or string: "latest"
:rtype: :class:`canvasapi.outcome_import.OutcomeImport`
"""
if outcome_import == "latest":
outcome_import_id = "latest"
else:
outcome_import_id = obj_or_id(
outcome_import, "outcome_import", (OutcomeImport,)
)
response = self._requester.request(
"GET",
"courses/{}/outcome_imports/{}".format(self.id, outcome_import_id),
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update({"course_id": self.id})
return OutcomeImport(self._requester, response_json)
def get_outcome_result_rollups(self, **kwargs):
"""
Get all outcome result rollups for context - BETA
:calls: `GET /api/v1/courses/:course_id/outcome_rollups \
<https://canvas.instructure.com/doc/api/outcome_results.html#method.outcome_results.rollups>`_
:returns: List of outcome result rollups in the context.
:rtype: dict
"""
response = self._requester.request(
"GET",
"courses/{}/outcome_rollups".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return response.json()
def get_outcome_results(self, **kwargs):
"""
Get all outcome results for context - BETA
:calls: `GET /api/v1/courses/:course_id/outcome_results \
<https://canvas.instructure.com/doc/api/outcome_results.html#method.outcome_results.index>`_
:returns: List of potential related outcome result dicts.
:rtype: dict
"""
response = self._requester.request(
"GET",
"courses/{}/outcome_results".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return response.json()
def get_page(self, url, **kwargs):
"""
Retrieve the contents of a wiki page.
:calls: `GET /api/v1/courses/:course_id/pages/:url \
<https://canvas.instructure.com/doc/api/pages.html#method.wiki_pages_api.show>`_
:param url: The url for the page.
:type url: str
:returns: The specified page.
:rtype: :class:`canvasapi.page.Page`
"""
response = self._requester.request(
"GET",
"courses/{}/pages/{}".format(self.id, url),
_kwargs=combine_kwargs(**kwargs),
)
page_json = response.json()
page_json.update({"course_id": self.id})
return Page(self._requester, page_json)
def get_pages(self, **kwargs):
"""
List the wiki pages associated with a course.
:calls: `GET /api/v1/courses/:course_id/pages \
<https://canvas.instructure.com/doc/api/pages.html#method.wiki_pages_api.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.page.Page`
"""
return PaginatedList(
Page,
self._requester,
"GET",
"courses/{}/pages".format(self.id),
{"course_id": self.id},
_kwargs=combine_kwargs(**kwargs),
)
def get_quiz(self, quiz, **kwargs):
"""
Return the quiz with the given id.
:calls: `GET /api/v1/courses/:course_id/quizzes/:id \
<https://canvas.instructure.com/doc/api/quizzes.html#method.quizzes/quizzes_api.show>`_
:param quiz: The object or ID of the quiz to retrieve.
:type quiz: :class:`canvasapi.quiz.Quiz` or int
:rtype: :class:`canvasapi.quiz.Quiz`
"""
from canvasapi.quiz import Quiz
quiz_id = obj_or_id(quiz, "quiz", (Quiz,))
response = self._requester.request(
"GET",
"courses/{}/quizzes/{}".format(self.id, quiz_id),
_kwargs=combine_kwargs(**kwargs),
)
quiz_json = response.json()
quiz_json.update({"course_id": self.id})
return Quiz(self._requester, quiz_json)
def get_quiz_overrides(self, **kwargs):
"""
Retrieve the actual due-at, unlock-at,
and available-at dates for quizzes based on
the assignment overrides active for the current API user.
:calls: `GET /api/v1/courses/:course_id/quizzes/assignment_overrides \
<https://canvas.instructure.com/doc/api/quiz_assignment_overrides.html#method.quizzes/quiz_assignment_overrides.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.quiz.QuizAssignmentOverrideSet`
"""
from canvasapi.quiz import QuizAssignmentOverrideSet
return PaginatedList(
QuizAssignmentOverrideSet,
self._requester,
"GET",
"courses/{}/quizzes/assignment_overrides".format(self.id),
_root="quiz_assignment_overrides",
_kwargs=combine_kwargs(**kwargs),
)
def get_quizzes(self, **kwargs):
"""
Return a list of quizzes belonging to this course.
:calls: `GET /api/v1/courses/:course_id/quizzes \
<https://canvas.instructure.com/doc/api/quizzes.html#method.quizzes/quizzes_api.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.quiz.Quiz`
"""
from canvasapi.quiz import Quiz
return PaginatedList(
Quiz,
self._requester,
"GET",
"courses/{}/quizzes".format(self.id),
{"course_id": self.id},
_kwargs=combine_kwargs(**kwargs),
)
def get_recent_students(self, **kwargs):
"""
Return a list of students in the course ordered by how recently they
have logged in.
:calls: `GET /api/v1/courses/:course_id/recent_students \
<https://canvas.instructure.com/doc/api/courses.html#method.courses.recent_students>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.user.User`
"""
from canvasapi.user import User
return PaginatedList(
User,
self._requester,
"GET",
"courses/{}/recent_students".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def get_root_outcome_group(self, **kwargs):
"""
Redirect to root outcome group for context
:calls: `GET /api/v1/courses/:course_id/root_outcome_group \
<https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.redirect>`_
:returns: The OutcomeGroup of the context.
:rtype: :class:`canvasapi.outcome.OutcomeGroup`
"""
from canvasapi.outcome import OutcomeGroup
response = self._requester.request(
"GET",
"courses/{}/root_outcome_group".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return OutcomeGroup(self._requester, response.json())
def get_rubric(self, rubric_id, **kwargs):
"""
Get a single rubric, based on rubric id.
:calls: `GET /api/v1/courses/:course_id/rubrics/:id \
<https://canvas.instructure.com/doc/api/rubrics.html#method.rubrics_api.show>`_
:param rubric_id: The ID of the rubric.
:type rubric_id: int
:rtype: :class:`canvasapi.rubric.Rubric`
"""
response = self._requester.request(
"GET",
"courses/%s/rubrics/%s" % (self.id, rubric_id),
_kwargs=combine_kwargs(**kwargs),
)
return Rubric(self._requester, response.json())
def get_rubrics(self, **kwargs):
"""
Get the paginated list of active rubrics for the current course.
:calls: `GET /api/v1/courses/:course_id/rubrics \
<https://canvas.instructure.com/doc/api/rubrics.html#method.rubrics_api.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.rubric.Rubric`
"""
return PaginatedList(
Rubric,
self._requester,
"GET",
"courses/%s/rubrics" % (self.id),
_kwargs=combine_kwargs(**kwargs),
)
def get_section(self, section, **kwargs):
"""
Retrieve a section.
:calls: `GET /api/v1/courses/:course_id/sections/:id \
<https://canvas.instructure.com/doc/api/sections.html#method.sections.show>`_
:param section: The object or ID of the section to retrieve.
:type section: :class:`canvasapi.section.Section` or int
:rtype: :class:`canvasapi.section.Section`
"""
from canvasapi.section import Section
section_id = obj_or_id(section, "section", (Section,))
response = self._requester.request(
"GET",
"courses/{}/sections/{}".format(self.id, section_id),
_kwargs=combine_kwargs(**kwargs),
)
return Section(self._requester, response.json())
def get_sections(self, **kwargs):
"""
List all sections in a course.
:calls: `GET /api/v1/courses/:course_id/sections \
<https://canvas.instructure.com/doc/api/sections.html#method.sections.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.section.Section`
"""
from canvasapi.section import Section
return PaginatedList(
Section,
self._requester,
"GET",
"courses/{}/sections".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def get_settings(self, **kwargs):
"""
Returns this course's settings.
:calls: `GET /api/v1/courses/:course_id/settings \
<https://canvas.instructure.com/doc/api/courses.html#method.courses.settings>`_
:rtype: dict
"""
response = self._requester.request(
"GET",
"courses/{}/settings".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return response.json()
def get_single_grading_standard(self, grading_standard_id, **kwargs):
"""
Get a single grading standard from the course.
:calls: `GET /api/v1/courses/:course_id/grading_standards/:grading_standard_id \
<https://canvas.instructure.com/doc/api/grading_standards.html#method.grading_standards_api.context_show>`_
:param grading_standard_id: The grading standard id
:type grading_standard_id: int
:rtype: :class:`canvasapi.grading_standards.GradingStandard`
"""
response = self._requester.request(
"GET",
"courses/%s/grading_standards/%d" % (self.id, grading_standard_id),
_kwargs=combine_kwargs(**kwargs),
)
return GradingStandard(self._requester, response.json())
def get_submission_history(self, date, grader_id, assignment_id, **kwargs):
"""
Gives a nested list of submission versions.
:calls: `GET /api/v1/courses/:course_id/gradebook_history/:date/graders\
/:grader_id/assignments/:assignment_id/submissions\
<https://canvas.instructure.com/doc/api/gradebook_history.html#method.\
gradebook_history_api.submissions>`_
:param date: The date for which you would like to see submissions
:type grader_id: str
:param grader_id: The ID of the grader for which you want to see submissions.
:type grader_id: int
:param assignment_id: The ID of the assignment for which you want to see submissions
:type assignment_id: int
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.gradebook_history.SubmissionHistory`
"""
return PaginatedList(
SubmissionHistory,
self._requester,
"GET",
"courses/{}/gradebook_history/{}/graders/{}/assignments/{}/submissions".format(
self.id, date, grader_id, assignment_id
),
kwargs=combine_kwargs(**kwargs),
)
def get_tabs(self, **kwargs):
"""
List available tabs for a course.
Returns a list of navigation tabs available in the current context.
:calls: `GET /api/v1/courses/:course_id/tabs \
<https://canvas.instructure.com/doc/api/tabs.html#method.tabs.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.tab.Tab`
"""
return PaginatedList(
Tab,
self._requester,
"GET",
"courses/{}/tabs".format(self.id),
{"course_id": self.id},
_kwargs=combine_kwargs(**kwargs),
)
def get_todo_items(self, **kwargs):
"""
Returns the current user's course-specific todo items.
:calls: `GET /api/v1/courses/:course_id/todo \
<https://canvas.instructure.com/doc/api/courses.html#method.courses.todo_items>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.todo.Todo`
"""
return PaginatedList(
Todo,
self._requester,
"GET",
"courses/{}/todo".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def get_uncollated_submissions(self, **kwargs):
"""
Gives a paginated, uncollated list of submission versions for all matching
submissions in the context. This SubmissionVersion objects will not include
the new_grade or previous_grade keys, only the grade; same for graded_at
and grader.
:calls: `GET /api/v1/courses/:course_id/gradebook_history/feed\
<https://canvas.instructure.com/doc/api/gradebook_history.html#method\
.gradebook_history_api.feed>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.gradebook_history.SubmissionVersion`
"""
return PaginatedList(
SubmissionVersion,
self._requester,
"GET",
"courses/{}/gradebook_history/feed".format(self.id),
kwargs=combine_kwargs(**kwargs),
)
def get_user(self, user, user_id_type=None, **kwargs):
"""
Retrieve a user by their ID. `user_id_type` denotes which endpoint to try as there are
several different ids that can pull the same user record from Canvas.
:calls: `GET /api/v1/courses/:course_id/users/:id \
<https://canvas.instructure.com/doc/api/courses.html#method.courses.user>`_
:param user: The object or ID of the user to retrieve.
:type user: :class:`canvasapi.user.User` or int
:param user_id_type: The type of the ID to search for.
:type user_id_type: str
:rtype: :class:`canvasapi.user.User`
"""
from canvasapi.user import User
if user_id_type:
uri = "courses/{}/users/{}:{}".format(self.id, user_id_type, user)
else:
user_id = obj_or_id(user, "user", (User,))
uri = "courses/{}/users/{}".format(self.id, user_id)
response = self._requester.request("GET", uri, _kwargs=combine_kwargs(**kwargs))
return User(self._requester, response.json())
def get_user_in_a_course_level_assignment_data(self, user, **kwargs):
"""
Return a list of assignments for the course sorted by due date
:calls: `GET /api/v1/courses/:course_id/analytics/users/:student_id/assignments \
<https://canvas.instructure.com/doc/api/analytics.html#method.analytics_api.student_in_course_assignments>`_
:param user: The object or ID of the related user
:type user: :class:`canvasapi.user.User` or int
:rtype: dict
"""
from canvasapi.user import User
user_id = obj_or_id(user, "user", (User,))
response = self._requester.request(
"GET",
"courses/{}/analytics/users/{}/assignments".format(self.id, user_id),
_kwargs=combine_kwargs(**kwargs),
)
return response.json()
def get_user_in_a_course_level_messaging_data(self, user, **kwargs):
"""
Return messaging hits grouped by day through the entire history of the course
:calls: `GET /api/v1/courses/:course_id/analytics/users/:student_id/communication \
<https://canvas.instructure.com/doc/api/analytics.html#method.analytics_api.student_in_course_messaging>`_
:param user: The object or ID of the related user
:type user: :class:`canvasapi.user.User` or int
:rtype: dict
"""
from canvasapi.user import User
user_id = obj_or_id(user, "user", (User,))
response = self._requester.request(
"GET",
"courses/{}/analytics/users/{}/communication".format(self.id, user_id),
_kwargs=combine_kwargs(**kwargs),
)
return response.json()
def get_user_in_a_course_level_participation_data(self, user, **kwargs):
"""
Return page view hits grouped by hour and participation details through course's history
:calls: `GET /api/v1/courses/:course_id/analytics/users/:student_id/activity \
<https://canvas.instructure.com/doc/api/analytics.html#method.analytics_api.student_in_course_participation>`_
:param user: The object or ID of the related user
:type user: :class:`canvasapi.user.User` or int
:rtype: dict
"""
from canvasapi.user import User
user_id = obj_or_id(user, "user", (User,))
response = self._requester.request(
"GET",
"courses/{}/analytics/users/{}/activity".format(self.id, user_id),
_kwargs=combine_kwargs(**kwargs),
)
return response.json()
def get_users(self, **kwargs):
"""
List all users in a course.
:calls: `GET /api/v1/courses/:course_id/search_users \
<https://canvas.instructure.com/doc/api/courses.html#method.courses.users>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.user.User`
"""
from canvasapi.user import User
return PaginatedList(
User,
self._requester,
"GET",
"courses/{}/search_users".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def import_outcome(self, attachment, **kwargs):
"""
Import outcome into canvas.
:calls: `POST /api/v1/courses/:course_id/outcome_imports \
<https://canvas.instructure.com/doc/api/outcome_imports.html#method.outcome_imports_api.create>`_
:param attachment: A file handler or path of the file to import.
:type attachment: file or str
:rtype: :class:`canvasapi.outcome_import.OutcomeImport`
"""
attachment, is_path = file_or_path(attachment)
try:
response = self._requester.request(
"POST",
"courses/{}/outcome_imports".format(self.id),
file={"attachment": attachment},
_kwargs=combine_kwargs(**kwargs),
)
response_json = response.json()
response_json.update({"course_id": self.id})
return OutcomeImport(self._requester, response_json)
finally:
if is_path:
attachment.close()
def list_blueprint_subscriptions(self, **kwargs):
"""
Return a list of blueprint subscriptions for the given course.
:calls: `GET /api/v1/courses/:course_id/blueprint_subscriptions\
<https://canvas.instructure.com/doc/api/blueprint_courses.html#method.\
master_courses/master_templates.subscriptions_index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.blueprint.BlueprintSubscription`
"""
return PaginatedList(
BlueprintSubscription,
self._requester,
"GET",
"courses/{}/blueprint_subscriptions".format(self.id),
{"course_id": self.id},
kwargs=combine_kwargs(**kwargs),
)
def preview_html(self, html, **kwargs):
"""
Preview HTML content processed for this course.
:calls: `POST /api/v1/courses/:course_id/preview_html \
<https://canvas.instructure.com/doc/api/courses.html#method.courses.preview_html>`_
:param html: The HTML code to preview.
:type html: str
:rtype: str
"""
kwargs["html"] = html
response = self._requester.request(
"POST",
"courses/{}/preview_html".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return response.json().get("html", "")
def remove_usage_rights(self, **kwargs):
"""
Removes the usage rights for specified files that are under the current course scope
:calls: `DELETE /api/v1/courses/:course_id/usage_rights \
<https://canvas.instructure.com/doc/api/files.html#method.usage_rights.remove_usage_rights>`_
:rtype: dict
"""
response = self._requester.request(
"DELETE",
"courses/{}/usage_rights".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return response.json()
def reorder_pinned_topics(self, order, **kwargs):
"""
Puts the pinned discussion topics in the specified order.
All pinned topics should be included.
:calls: `POST /api/v1/courses/:course_id/discussion_topics/reorder \
<https://canvas.instructure.com/doc/api/discussion_topics.html#method.discussion_topics.reorder>`_
:param order: The ids of the pinned discussion topics in the desired order.
e.g. [104, 102, 103], (104, 102, 103), or "104,102,103"
:type order: string or iterable sequence of values
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.discussion_topic.DiscussionTopic`
"""
# Convert iterable sequence to comma-separated string
if is_multivalued(order):
order = ",".join([str(topic_id) for topic_id in order])
# Check if is a string with commas
if not isinstance(order, str) or "," not in order:
raise ValueError("Param `order` must be a list, tuple, or string.")
kwargs["order"] = order
response = self._requester.request(
"POST",
"courses/{}/discussion_topics/reorder".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return response.json().get("reorder")
def reset(self, **kwargs):
"""
Delete the current course and create a new equivalent course
with no content, but all sections and users moved over.
:calls: `POST /api/v1/courses/:course_id/reset_content \
<https://canvas.instructure.com/doc/api/courses.html#method.courses.reset_content>`_
:rtype: :class:`canvasapi.course.Course`
"""
response = self._requester.request(
"POST",
"courses/{}/reset_content".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return Course(self._requester, response.json())
def resolve_path(self, full_path=None, **kwargs):
"""
Returns the paginated list of all of the folders in the given
path starting at the course root folder. Returns root folder if called
with no arguments.
:calls: `GET /api/v1/courses/:course_id/folders/by_path/*full_path \
<https://canvas.instructure.com/doc/api/files.html#method.folders.resolve_path>`_
:param full_path: Full path to resolve, relative to course root.
:type full_path: string
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.folder.Folder`
"""
if full_path:
return PaginatedList(
Folder,
self._requester,
"GET",
"courses/{0}/folders/by_path/{1}".format(self.id, full_path),
_kwargs=combine_kwargs(**kwargs),
)
else:
return PaginatedList(
Folder,
self._requester,
"GET",
"courses/{0}/folders/by_path".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
def set_quiz_extensions(self, quiz_extensions, **kwargs):
"""
Set extensions for student all quiz submissions in a course.
:calls: `POST /api/v1/courses/:course_id/quizzes/:quiz_id/extensions
<https://canvas.instructure.com/doc/api/quiz_extensions.html#method.quizzes/quiz_extensions.create>`_
:param quiz_extensions: List of dictionaries representing extensions.
:type quiz_extensions: list
:rtype: list of :class:`canvasapi.quiz.QuizExtension`
Example Usage:
>>> course.set_quiz_extensions([
... {
... 'user_id': 1,
... 'extra_time': 60,
... 'extra_attempts': 1
... },
... {
... 'user_id': 2,
... 'extra_attempts': 3
... },
... {
... 'user_id': 3,
... 'extra_time': 20
... }
... ])
"""
if not isinstance(quiz_extensions, list) or not quiz_extensions:
raise ValueError("Param `quiz_extensions` must be a non-empty list.")
if any(not isinstance(extension, dict) for extension in quiz_extensions):
raise ValueError("Param `quiz_extensions` must only contain dictionaries")
if any("user_id" not in extension for extension in quiz_extensions):
raise RequiredFieldMissing(
"Dictionaries in `quiz_extensions` must contain key `user_id`"
)
kwargs["quiz_extensions"] = quiz_extensions
response = self._requester.request(
"POST",
"courses/{}/quiz_extensions".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
extension_list = response.json()["quiz_extensions"]
return [
QuizExtension(self._requester, extension) for extension in extension_list
]
def set_usage_rights(self, **kwargs):
"""
Changes the usage rights for specified files that are under the current course scope
:calls: `PUT /api/v1/courses/:course_id/usage_rights \
<https://canvas.instructure.com/doc/api/files.html#method.usage_rights.set_usage_rights>`_
:rtype: :class:`canvasapi.usage_rights.UsageRights`
"""
response = self._requester.request(
"PUT",
"courses/{}/usage_rights".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return UsageRights(self._requester, response.json())
def show_front_page(self, **kwargs):
"""
Retrieve the content of the front page.
:calls: `GET /api/v1/courses/:course_id/front_page \
<https://canvas.instructure.com/doc/api/pages.html#method.wiki_pages_api.show_front_page>`_
:rtype: :class:`canvasapi.course.Course`
"""
response = self._requester.request(
"GET",
"courses/{}/front_page".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
page_json = response.json()
page_json.update({"course_id": self.id})
return Page(self._requester, page_json)
def submissions_bulk_update(self, **kwargs):
"""
Update the grading and comments on multiple student's assignment
submissions in an asynchronous job.
:calls: `POST /api/v1/courses/:course_id/submissions/update_grades \
<https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.bulk_update>`_
:rtype: :class:`canvasapi.progress.Progress`
"""
response = self._requester.request(
"POST",
"courses/{}/submissions/update_grades".format(self.id),
_kwargs=combine_kwargs(**kwargs),
)
return Progress(self._requester, response.json())
def update(self, **kwargs):
"""
Update this course.
:calls: `PUT /api/v1/courses/:id \
<https://canvas.instructure.com/doc/api/courses.html#method.courses.update>`_
:returns: `True` if the course was updated, `False` otherwise.
:rtype: `bool`
"""
response = self._requester.request(
"PUT", "courses/{}".format(self.id), _kwargs=combine_kwargs(**kwargs)
)
if response.json().get("name"):
super(Course, self).set_attributes(response.json())
return response.json().get("name")
def update_assignment_overrides(self, assignment_overrides, **kwargs):
"""
Update a list of specified overrides for each assignment.
Note: All current overridden values must be supplied if they are to be retained.
:calls: `PUT /api/v1/courses/:course_id/assignments/overrides \
<https://canvas.instructure.com/doc/api/assignments.html#method.assignment_overrides.batch_update>`_
:param assignment_overrides: Attributes for the updated assignment overrides.
:type assignment_overrides: list
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.assignment.AssignmentOverride`
"""
from canvasapi.assignment import AssignmentOverride
kwargs["assignment_overrides"] = assignment_overrides
return PaginatedList(
AssignmentOverride,
self._requester,
"PUT",
"courses/{}/assignments/overrides".format(self.id),
{"course_id": self.id},
_kwargs=combine_kwargs(**kwargs),
)
def update_settings(self, **kwargs):
"""
Update a course's settings.
:calls: `PUT /api/v1/courses/:course_id/settings \
<https://canvas.instructure.com/doc/api/courses.html#method.courses.update_settings>`_
:rtype: dict
"""
response = self._requester.request(
"PUT", "courses/{}/settings".format(self.id), **kwargs
)
return response.json()
def upload(self, file: FileOrPathLike, **kwargs):
"""
Upload a file to this course.
:calls: `POST /api/v1/courses/:course_id/files \
<https://canvas.instructure.com/doc/api/courses.html#method.courses.create_file>`_
:param file: The file or path of the file to upload.
:type file: file or str
:returns: True if the file uploaded successfully, False otherwise, \
and the JSON response from the API.
:rtype: tuple
"""
return Uploader(
self._requester, "courses/{}/files".format(self.id), file, **kwargs
).start()
class CourseNickname(CanvasObject):
def __str__(self):
return "{} ({})".format(self.nickname, self.course_id)
def remove(self, **kwargs):
"""
Remove the nickname for the given course. Subsequent course API
calls will return the actual name for the course.
:calls: `DELETE /api/v1/users/self/course_nicknames/:course_id \
<https://canvas.instructure.com/doc/api/users.html#method.course_nicknames.delete>`_
:rtype: :class:`canvasapi.course.CourseNickname`
"""
response = self._requester.request(
"DELETE",
"users/self/course_nicknames/{}".format(self.course_id),
_kwargs=combine_kwargs(**kwargs),
)
return CourseNickname(self._requester, response.json())
class LatePolicy(CanvasObject):
def __str__(self):
return "Late Policy {}".format(self.id)
| mit | 1,574,006,498,719,888,100 | 34.499625 | 132 | 0.601858 | false |
remeeting/mrp-score | match_ref_hyp.py | 1 | 7218 | #!/usr/bin/env python
######################################################################
#
# File: match_ref_hyp.py
# Author: Adam Janin
# Feb 23, 2017
#
# Given a reference file and a hypothesis file, figure out which parts
# of the hypotheses match which part of the reference. Output the
# hypotheses file as stm, where the non-text lines are taken
# from the matching lines of the reference (except where there's
# no matching references, where the non-text will be "unmatched").
#
# Currently, only stm for reference and ctm for hypthosis files are
# supported. The algorithm currently mimics sclite; the midpoint of a
# ctm word is used to determine its time, and it is assigned to the
# first stm segment it's within.
#
# This code can be used either as a stand-alone program, or it can
# be imported. See function match_ref_hyp()
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. The
# License may be found in the file LICENSE at the top level of the
# repository / directory tree.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
from __future__ import print_function
from six.moves import input
import six
import argparse
from bisect import bisect
from collections import defaultdict
import logging
from operator import methodcaller
import sys
import utils
VERSION = 0.1
class Global:
'''Stores globals. There should be no instances of Global.'''
# Command line arguments
args = None
# Two times that are less than this are considered identical.
epstime = 0.005
# end class Global
def main(argv):
parse_arguments(argv[1:])
setup_logging()
instm = utils.read_stm(Global.args.ref) # List of STM_Line
inctm = utils.read_ctm(Global.args.inhyp) # List of CTM_Line
outhyp = match_ref_hyp(instm, inctm) # List of STM_Line
for line in outhyp:
Global.args.outhyp.write(str(line))
Global.args.outhyp.write('\n')
# end main()
def match_ref_hyp(instm, inctm):
"""Given a list of references of type STM_Line and a list of hypotheses of type CTM_Line, return a list of type STM_Line where each word in the input ctm is matched (in time) with a corresponding entry in the input stm file. If a word doesn't match any line in the input stm, it is given a new entry with speaker "unmatched" (since the input ctm doesn't specify speaker)."""
outhyps = []
# Go through the ctms, storing each file/channel in a separately
# sorted list.
# Map of e.g. 'sw_4390 A' -> list of CTM_Line sorted by midtime
sorted_ctms = defaultdict(list)
# Map of e.g. 'sw_4390 A' -> list of booleans, False if the ctm hasn't been assigned
# to an stm line, True otherwise.
assigned = {}
# The midtimes of sorted_ctms in the same order.
sorted_ctm_midtimes = {}
# Store the ctms by file/channel
for ctm in inctm:
sorted_ctms['%s %s'%(ctm.file, ctm.channel)].append(ctm)
# Sort them by midtime.
for key in sorted_ctms:
ctmlist = sorted_ctms[key]
ctmlist.sort(key=methodcaller('midtime'))
sorted_ctm_midtimes[key] = [x.midtime() for x in ctmlist]
assigned[key] = [False]*len(ctmlist)
for ref in instm:
outwords = []
filechannel = '%s %s'%(ref.file, ref.channel)
if filechannel not in sorted_ctm_midtimes:
continue
cur_ctm_midtimes = sorted_ctm_midtimes[filechannel]
cur_ctms = sorted_ctms[filechannel]
for ii in range(bisect(cur_ctm_midtimes, ref.starttime), len(cur_ctm_midtimes)):
hyptime = cur_ctm_midtimes[ii]
hyp = cur_ctms[ii]
if hyptime < ref.starttime or hyptime > ref.endtime:
break
else:
if assigned[filechannel][ii]:
# If it's only barely assigned to this segment, don't report the error.
if abs(hyptime - ref.starttime) > Global.epstime and abs(hyptime - ref.endtime) > Global.epstime:
logging.warning("Warning: Found hypothesis that matches more than one stm line. This indicates that the stm contains overlapping segments. The ctm word has been assigned to the first segment. The ctm entry was:\n%s\nThe second stm entry was:\n%s\n"%(hyp, ref))
else:
assigned[filechannel][ii] = True
outwords.append(hyp.word)
# Make a copy of the corresponding ref line, and replace the words.
outhyp = utils.STM_Line(str(ref))
outhyp.words = ' '.join(outwords)
outhyps.append(outhyp)
# Now find any ctms that were not assigned to an stm.
outwords = []
for filechannel in sorted_ctms:
for ii in range(len(sorted_ctms[filechannel])):
if not assigned[filechannel][ii]:
hyp = sorted_ctms[filechannel][ii]
outhyp = utils.STM_Line()
outhyp.file = hyp.file
outhyp.channel = hyp.channel
outhyp.speaker = 'unassigned'
outhyp.starttime = hyp.starttime
outhyp.endtime = hyp.starttime + hyp.duration
outhyp.label = '<>'
outhyp.words = hyp.word
outhyps.append(outhyp)
return outhyps
# end match_ref_hyp()
def parse_arguments(strs):
parser = argparse.ArgumentParser(description='Given input references in stm format and input hypothesis in ctm format, generate a hypothesis file in stm format. Each hypothisis line is generated by picking any words from the same file/channel whose midpoint intersects the corresponding reference line. Any hypothesis words that do not match a reference line are added at the end of the hypothesis file. Version %s.'%(VERSION))
parser.add_argument('-loglevel',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='WARNING',
help='Logging level (default %(default)s)')
parser.add_argument('-version', '--version', action='version', version=str(VERSION))
parser.add_argument('-ref', help='Input reference file in stm format', type=argparse.FileType('r'), required=True)
parser.add_argument('-inhyp', help='Input hypothesis file in ctm format', type=argparse.FileType('r'), required=True)
parser.add_argument('-outhyp', help='Output hypothesis file in stm format', type=argparse.FileType('w'), required=True)
Global.args = parser.parse_args(strs)
# end parse_arguments()
def setup_logging():
numeric_level = getattr(logging, Global.args.loglevel, None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % Global.args.loglevel)
logging.basicConfig(level=numeric_level, format="%(module)s:%(levelname)s: %(message)s")
# end setup_logging()
if __name__ == "__main__":
main(sys.argv)
| apache-2.0 | 9,198,778,341,884,706,000 | 39.550562 | 431 | 0.655583 | false |
molgor/cyberklt | cyberklt/drivers/models.py | 1 | 1851 | # -*- coding: utf-8 -*-
import numpy as np
from django.db import models
import drivers.MPL3115A2 as mpl
class Temperature(models.Model):
temp = models.FloatField(default=-999.9) #Celcius
altitude = models.FloatField(default=np.nan) #meters
timestamp = models.DateTimeField(auto_now=True)
instrument = models.CharField(max_length=100,default="MPL3115A2")
@classmethod
def getTemp(cls):
"""
Supose to measure temperature from the sensor
"""
data = mpl.readData()
temp = cls(temp=data['Temperature'])
return temp
@classmethod
def create(cls):
"""
Supose to measure temperature from the sensor
"""
data = mpl.readData()
temp = cls(temp=data['Temperature'],altitude=data['Altitude'])
temp.save()
return temp
def __str__(self):
time_ = self.timestamp.strftime(" |%H:%M %d-%M-%Y|")
cad = "<Temperature reading: %s , %s>"%(self.temp,time_)
return cad
class Pressure(models.Model):
pressure = models.FloatField(default=-999.9) #KPa
timestamp = models.DateTimeField(auto_now=True)
instrument = models.CharField(max_length=100,default="MPL3115A2")
@classmethod
def getPressure(cls):
"""
Supose to measure temperature from the sensor
"""
data = mpl.readData(mode=2)
temp = cls(temp=data['Pressure'])
return temp
@classmethod
def create(cls):
"""
Supose to measure temperature from the sensor
"""
data = mpl.readData(mode=2)
temp = cls(pressure=data['Pressure'])
temp.save()
return temp
def __str__(self):
time_ = self.timestamp.strftime(" |%H:%M %d-%M-%Y|")
cad = "<Pressure reading: %s , %s>"%(self.pressure,time_)
return cad
| gpl-3.0 | 6,452,715,802,677,283,000 | 27.476923 | 70 | 0.594273 | false |
nicolashainaux/mathmaker | toolbox/build_db.py | 1 | 89443 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Mathmaker creates automatically maths exercises sheets
# with their answers
# Copyright 2006-2017 Nicolas Hainaux <[email protected]>
# This file is part of Mathmaker.
# Mathmaker is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
# Mathmaker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Mathmaker; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
This script adds new entries to the database.
It actually erases the database and builds it entirely.
It will add all entries:
- from files mini_pb_addi_direct.yaml, mini_pb_divi_direct.yaml,
mini_pb_subtr_direct.yaml and mini_pb_multi_direct.yaml from data/wordings/,
- from all w3l.po files from locale/*/LC_MESSAGES/
- from all w4l.po files from locale/*/LC_MESSAGES/
- from all w5l.po files from locale/*/LC_MESSAGES/
- from all *_names.po files from locale/*/LC_MESSAGES/
- all single ints from 2 to SINGLEINTS_MAX
- all single decimal numbers with one digit from 0.0 to 100.0
- all integers pairs from 2 to INTPAIRS_MAX
- all integers triples from 2 to INTPAIRS_MAX
- a list of "clever" couples of (integer, decimal) (for multiplications)
- a list of angles' ranges (around 0, 90, 180, 270)
- the list of variants identification numbers (from 0 to 23 and 100 to 155,
so far) for order_of_operations questions
- all unit conversions, sorted in categories and levels,
- decimals from 0.001 to 9.999
- digits positions: one table for thousands to thousandths, another for
tenths to thousandths.
- simple fractions: 1/2 to 1/10, 2/3 to 2/10 etc. until 9/10
- dvipsnames_selection for LaTeX package 'xcolor'
- polygons shapes
"""
import os
import sys
import json
import sqlite3
from math import gcd
from decimal import Decimal
from mathmakerlib.calculus import Number
from mathmaker import settings
from mathmaker.lib.tools import po_file_get_list_of, check_unique_letters_words
from mathmaker.lib.tools.frameworks import get_attributes
from mathmaker.lib.tools.distcode import distcode
from mathmaker.lib.tools.database import parse_sql_creation_query
from mathmaker.lib.constants.numeration import DIGITSPLACES
from mathmaker.lib.constants.numeration import DIGITSPLACES_DECIMAL
INTPAIRS_MAX = 1000
INTTRIPLES_MAX = 200
INTQUADRUPLES_MAX = 50
INTQUINTUPLES_MAX = 36
INTSEXTUPLES_MAX = 25
SINGLEINTS_MAX = 1000
NNPAIRS_MAX = 100
NNTRIPLES_MAX = 10
NNQUADRUPLES_MAX = 10
NNQUINTUPLES_MAX = 10
NNSEXTUPLES_MAX = 10
def _suits_for_deci1(i, j):
return not(i % 10 == 0 and j % 10 == 0)
def _suits_for_deci2(i, j):
return not(i % 10 == 0 or j % 10 == 0)
def __main__():
settings.init()
WORDINGS_DIR = settings.datadir + "wordings/"
WORDINGS_FILES = [WORDINGS_DIR + n + ".yaml"
for n in ["mini_pb_addi_direct",
"mini_pb_divi_direct",
"mini_pb_subtr_direct",
"mini_pb_multi_direct"]]
# Existent db is deleted. A brand new empty db is created.
if os.path.isfile(settings.path.db_dist):
sys.stderr.write('Remove previous database...\n')
os.remove(settings.path.db_dist)
if os.path.isfile(settings.path.shapes_db_dist):
sys.stderr.write('Remove previous shapes database...\n')
os.remove(settings.path.shapes_db_dist)
if os.path.isfile(settings.path.solids_db_dist):
sys.stderr.write('Remove previous shapes database...\n')
os.remove(settings.path.solids_db_dist)
if os.path.isfile(settings.path.anglessets_db_dist):
sys.stderr.write('Remove previous anglessets database...\n')
os.remove(settings.path.anglessets_db_dist)
if os.path.isfile(settings.path.natural_nb_tuples_db_dist):
sys.stderr.write('Remove previous inttuples database...\n')
os.remove(settings.path.natural_nb_tuples_db_dist)
sys.stderr.write('Create new databases...\n')
open(settings.path.db_dist, 'a').close()
open(settings.path.shapes_db_dist, 'a').close()
open(settings.path.solids_db_dist, 'a').close()
open(settings.path.anglessets_db_dist, 'a').close()
open(settings.path.natural_nb_tuples_db_dist, 'a').close()
sys.stderr.write('Connect to databases...\n')
db = sqlite3.connect(settings.path.db_dist)
shapes_db = sqlite3.connect(settings.path.shapes_db_dist)
solids_db = sqlite3.connect(settings.path.solids_db_dist)
anglessets_db = sqlite3.connect(settings.path.anglessets_db_dist)
natural_nb_tuples_db = sqlite3.connect(
settings.path.natural_nb_tuples_db_dist)
natural_nb_tuples_db_creation_queries = []
sys.stderr.write('Create tables...\n')
# Creation of the tables
db_creation_queries = ['''CREATE TABLE w{}l
(id INTEGER PRIMARY KEY, language TEXT, word TEXT,
drawDate INTEGER)'''.format(n) for n in settings.available_wNl]
db_creation_queries += \
['''CREATE TABLE angle_decorations
(id INTEGER PRIMARY KEY, variety TEXT, hatchmark TEXT,
drawDate INTEGER)''',
'''CREATE TABLE names
(id INTEGER PRIMARY KEY, language TEXT, gender TEXT, name TEXT,
drawDate INTEGER)''',
'''CREATE TABLE mini_pb_wordings
(id INTEGER PRIMARY KEY, wording_context TEXT, wording TEXT,
nb1_min INTEGER, nb1_max INTEGER,
nb2_min INTEGER, nb2_max INTEGER,
back_to_unit TEXT, q_id TEXT, drawDate INTEGER)''',
'''CREATE TABLE single_ints
(id INTEGER PRIMARY KEY, nb1 INTEGER, drawDate INTEGER)''',
# DECIMAL(4, 1) stands for up to 4 integer digits, up to 1 fractional
# digit but these values may have no effect (purpose is only
# documentation)
'''CREATE TABLE single_deci1
(id INTEGER PRIMARY KEY, nb1 DECIMAL(4, 1), drawDate INTEGER)''',
'''CREATE TABLE angle_ranges
(id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER,
drawDate INTEGER)''',
'''CREATE TABLE units_conversions
(id INTEGER PRIMARY KEY, unit1 TEXT, unit2 TEXT, direction TEXT,
category TEXT, level INTEGER, dimension INTEGER,
drawDate INTEGER)''',
'''CREATE TABLE int_pairs
(id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER,
lock_equal_products INTEGER, drawDate INTEGER, clever INTEGER,
suits_for_deci1 INTEGER, suits_for_deci2 INTEGER)''',
'''CREATE TABLE int_triples
(id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER, nb3 INTEGER,
code TEXT, triangle INTEGER, isosceles INTEGER,
equilateral INTEGER, pythagorean INTEGER, equal_sides INTEGER,
drawDate INTEGER)''',
'''CREATE TABLE simple_fractions
(id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER,
reducible INTEGER, drawDate INTEGER)''',
# As int_deci_clever_pairs may be 'unioned' with int_pairs, its ids
# will be determined starting from the max id of int_pairs, in order
# to have unique ids over the two tables.
'''CREATE TABLE int_deci_clever_pairs
(id INTEGER, nb1 FLOAT, nb2 FLOAT, drawDate INTEGER,
clever INTEGER)''',
'''CREATE TABLE order_of_operations_variants
(id INTEGER PRIMARY KEY, nb1 INTEGER, drawDate INTEGER)''',
# DECIMAL(2, 3) stands for up to 2 integer digits,
# up to 3 fractional digits
# but these values may have no effect (purpose is only documentation)
# nz stands for "Non Zero digits (number)"
# iz stands for "Isolated Zeros (number)"
# fd stands for "Fractional Digits (number)"
'''CREATE TABLE decimals
(id INTEGER PRIMARY KEY, nb1 DECIMAL(2, 3), nz INTEGER,
iz INTEGER, fd INTEGER, overlap_level INTEGER,
pure_half INTEGER, pure_quarter INTEGER, drawDate INTEGER)''',
'''CREATE TABLE digits_places
(id INTEGER PRIMARY KEY, place DECIMAL(4, 3), drawDate INTEGER)''',
'''CREATE TABLE fracdigits_places
(id INTEGER PRIMARY KEY, place DECIMAL(4, 3), drawDate INTEGER)''',
'''CREATE TABLE dvipsnames_selection
(id INTEGER PRIMARY KEY, color_name TEXT, drawDate INTEGER)''',
]
for qr in db_creation_queries:
db.execute(qr)
sys.stderr.write('Insert data from locale/*/LC_MESSAGES/*.pot files...\n')
# Extract data from po(t) files and insert them into the db
for lang in next(os.walk(settings.localedir))[1]:
settings.language = lang
for n in settings.available_wNl:
if os.path.isfile(settings.localedir + lang
+ "/LC_MESSAGES/w{}l.po".format(str(n))):
words = po_file_get_list_of('words', lang, n)
check_unique_letters_words(words, n)
db_rows = list(zip([lang for _ in range(len(words))],
words,
[0 for _ in range(len(words))]))
db.executemany(
"INSERT INTO w{}l(language, word, drawDate) "
"VALUES(?, ?, ?)".format(str(n)),
db_rows)
for gender in ["masculine", "feminine"]:
if os.path.isfile(settings.localedir + lang
+ "/LC_MESSAGES/" + gender + "_names.po"):
# __
names = po_file_get_list_of('names', lang, gender)
db_rows = list(zip([lang for _ in range(len(names))],
[gender for _ in range(len(names))],
names,
[0 for _ in range(len(names))]))
db.executemany("INSERT "
"INTO names(language, gender, name, drawDate) "
"VALUES(?, ?, ?, ?)",
db_rows)
sys.stderr.write('Insert angles\'s decorations...\n')
db_rows = [('single', 'singledash', 0),
('single', 'doubledash', 0),
('single', 'tripledash', 0),
('double', None, 0),
('double', 'singledash', 0),
('triple', None, 0),
('triple', 'singledash', 0),
]
db.executemany("INSERT "
"INTO angle_decorations"
"(variety, hatchmark, drawDate) "
"VALUES(?, ?, ?)",
db_rows)
sys.stderr.write(
'Insert data from data/frameworks/wordings/*.yaml files...\n')
# Extract data from yaml files and insert them into the db
for f in WORDINGS_FILES:
wordings = get_attributes(f, "wording")
db_rows = list(zip([w['wording_context'] for w in wordings],
[w['wording'] for w in wordings],
[w['nb1_min'] for w in wordings],
[w['nb1_max'] for w in wordings],
[w['nb2_min'] for w in wordings],
[w['nb2_max'] for w in wordings],
[w['back_to_unit'] for w in wordings],
[w['q_id'] for w in wordings],
[0 for _ in range(len(wordings))]))
db.executemany("INSERT "
"INTO mini_pb_wordings(wording_context, wording, "
"nb1_min, nb1_max, nb2_min, nb2_max, back_to_unit, "
"q_id, drawDate) "
"VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)",
db_rows)
creation_query = '''CREATE TABLE mini_pb_prop_wordings
(id INTEGER PRIMARY KEY, wid INTEGER,
wording_context TEXT, wording TEXT,
coeff_min INTEGER, coeff_max INTEGER,
nb1_min INTEGER, nb1_max INTEGER,
nb2_min INTEGER, nb2_max INTEGER,
nb3_min INTEGER, nb3_max INTEGER,
solution_min INTEGER, solution_max INTEGER,
nb1_xcoeff INTEGER, nb2_xcoeff INTEGER,
nb3_xcoeff INTEGER,
nb1_may_be_deci INTEGER, nb2_may_be_deci INTEGER,
nb3_may_be_deci INTEGER, solution_may_be_deci INTEGER,
locked INTEGER, drawDate INTEGER)'''
db_creation_queries.append(creation_query)
db.execute(creation_query)
PROP_WORDINGS_FILE = WORDINGS_DIR + 'mini_pb_proportionality' + '.yaml'
wordings = get_attributes(PROP_WORDINGS_FILE, "wording")
db_rows = list(zip([i + 1 for i in range(len(wordings))],
[w.get('wording_context') for w in wordings],
[w.get('wording') for w in wordings],
[w.get('coeff_min', 0) for w in wordings],
[w.get('coeff_max', 10000) for w in wordings],
[w.get('nb1_min', 0) for w in wordings],
[w.get('nb1_max', 1000) for w in wordings],
[w.get('nb2_min', 0) for w in wordings],
[w.get('nb2_max', 1000) for w in wordings],
[w.get('nb3_min', 0) for w in wordings],
[w.get('nb3_max', 10000) for w in wordings],
[w.get('solution_min', 0) for w in wordings],
[w.get('solution_max', 10000) for w in wordings],
[w.get('nb1_xcoeff', 1) for w in wordings],
[w.get('nb2_xcoeff', 1) for w in wordings],
[w.get('nb3_xcoeff', 1) for w in wordings],
[w.get('nb1_may_be_deci', 0) for w in wordings],
[w.get('nb2_may_be_deci', 0) for w in wordings],
[w.get('nb3_may_be_deci', 0) for w in wordings],
[w.get('solution_may_be_deci', 0) for w in wordings],
[0 for _ in range(len(wordings))],
[0 for _ in range(len(wordings))]))
db.executemany("INSERT "
"INTO mini_pb_prop_wordings(wid, wording_context, wording, "
"coeff_min, coeff_max, nb1_min, nb1_max, nb2_min, nb2_max, "
"nb3_min, nb3_max, solution_min, solution_max, "
"nb1_xcoeff, nb2_xcoeff, nb3_xcoeff, "
"nb1_may_be_deci, nb2_may_be_deci, "
"nb3_may_be_deci, solution_may_be_deci, "
"locked, drawDate) "
"VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, "
"?, ?, ?, ?, ?)",
db_rows)
creation_query = '''CREATE TABLE mini_pb_time_wordings
(id INTEGER PRIMARY KEY, wid INTEGER,
wording_context TEXT, type TEXT, wording TEXT,
mini_start_hour INTEGER, mini_start_minute INTEGER,
maxi_start_hour INTEGER, maxi_start_minute INTEGER,
mini_duration_hour INTEGER,
mini_duration_minute INTEGER,
maxi_duration_hour INTEGER,
maxi_duration_minute INTEGER,
mini_end_hour INTEGER, mini_end_minute INTEGER,
maxi_end_hour INTEGER, maxi_end_minute INTEGER,
locked, drawDate INTEGER)'''
db_creation_queries.append(creation_query)
db.execute(creation_query)
TIME_WORDINGS_FILE = WORDINGS_DIR + 'mini_pb_time' + '.yaml'
wordings = get_attributes(TIME_WORDINGS_FILE, 'wording')
db_rows = list(zip([i + 1 for i in range(len(wordings))],
[w.get('wording_context') for w in wordings],
[w.get('type') for w in wordings],
[w.get('wording') for w in wordings],
[w.get('mini_start_hour') for w in wordings],
[w.get('mini_start_minute') for w in wordings],
[w.get('maxi_start_hour') for w in wordings],
[w.get('maxi_start_minute') for w in wordings],
[w.get('mini_duration_hour') for w in wordings],
[w.get('mini_duration_minute') for w in wordings],
[w.get('maxi_duration_hour') for w in wordings],
[w.get('maxi_duration_minute') for w in wordings],
[w.get('mini_end_hour') for w in wordings],
[w.get('mini_end_minute') for w in wordings],
[w.get('maxi_end_hour') for w in wordings],
[w.get('maxi_end_minute') for w in wordings],
[0 for _ in range(len(wordings))],
[0 for _ in range(len(wordings))]))
db.executemany("INSERT "
"INTO mini_pb_time_wordings(wid, wording_context, type, "
"wording, "
"mini_start_hour, mini_start_minute, maxi_start_hour, "
"maxi_start_minute, mini_duration_hour,"
"mini_duration_minute, maxi_duration_hour, "
"maxi_duration_minute, mini_end_hour, mini_end_minute, "
"maxi_end_hour, maxi_end_minute, locked, drawDate) "
"VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, "
"?, ?)",
db_rows)
creation_query = '''CREATE TABLE divisibility_statements
(id INTEGER PRIMARY KEY, wid INTEGER,
wording TEXT, drawDate INTEGER)'''
db_creation_queries.append(creation_query)
db.execute(creation_query)
wordings = get_attributes(WORDINGS_DIR + 'divisibility_statements.yaml',
'wording')
db_rows = list(zip([i + 1 for i in range(len(wordings))],
[w.get('wording') for w in wordings],
[0 for _ in range(len(wordings))]))
db.executemany("INSERT "
"INTO divisibility_statements"
"(wid, wording, drawDate) "
"VALUES(?, ?, ?)",
db_rows)
creation_query = '''CREATE TABLE distcodes
(id INTEGER PRIMARY KEY, nbof_nb INTEGER,
distcode TEXT, equilateral INTEGER,
equal_sides INTEGER, drawDate INTEGER)'''
db_creation_queries.append(creation_query)
db.execute(creation_query)
db_rows = [(2, '2', 1, 1, 0),
(2, '1_1', 0, 0, 0),
(3, '3', 1, 1, 0),
(3, '2_1', 0, 1, 0),
(3, '1_1_1', 0, 0, 0),
(4, '4', 1, 1, 0),
(4, '3_1', 0, 1, 0),
(4, '2_2', 0, 1, 0),
(4, '2_1_1', 0, 1, 0),
(4, '1_1_1_1', 0, 0, 0),
(5, '5', 1, 1, 0),
(5, '4_1', 0, 1, 0),
(5, '3_2', 0, 1, 0),
(5, '3_1_1', 0, 1, 0),
(5, '2_2_1', 0, 1, 0),
(5, '2_1_1_1', 0, 1, 0),
(5, '1_1_1_1_1', 0, 0, 0),
(6, '6', 1, 1, 0),
(6, '5_1', 0, 1, 0),
(6, '4_2', 0, 1, 0),
(6, '4_1_1', 0, 1, 0),
(6, '3_3', 0, 1, 0),
(6, '3_2_1', 0, 1, 0),
(6, '3_1_1_1', 0, 1, 0),
(6, '2_2_2', 0, 1, 0),
(6, '2_2_1_1', 0, 1, 0),
(6, '2_1_1_1_1_1', 0, 1, 0),
(6, '1_1_1_1_1_1', 0, 0, 0)]
db.executemany("INSERT "
"INTO distcodes"
"(nbof_nb, distcode, equilateral, equal_sides, drawDate) "
"VALUES(?, ?, ?, ? , ?)",
db_rows)
creation_query = '''CREATE TABLE directions
(id INTEGER PRIMARY KEY, direction TEXT,
drawDate INTEGER)'''
db_creation_queries.append(creation_query)
db.execute(creation_query)
db_rows = [('top-right', 0),
('top-left', 0),
('bottom-left', 0),
('bottom-right', 0)]
db.executemany("INSERT "
"INTO directions"
"(direction, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE times
(id INTEGER PRIMARY KEY, hour INTEGER, minute INTEGER,
drawDate INTEGER)'''
db_creation_queries.append(creation_query)
db.execute(creation_query)
db_rows = [(hour, minute, 0) for hour in range(24) for minute in range(60)]
db.executemany("INSERT "
"INTO times"
"(hour, minute, drawDate) "
"VALUES(?, ?, ?)",
db_rows)
sys.stderr.write('Insert mixed decimals and ints triples for '
'proportionality...\n')
integers = [_ for _ in range(2, 32)]
integers.append(50)
integers.append(60)
integers.append(80)
integers.append(100)
db_rows = [(0.666667, n1, n2,
float((Number('0.666667') * n1).rounded(Number('0.01'))),
float((Number('0.666667') * n2).rounded(Number('0.01'))), 0, 0)
for n1 in integers if n1 % 3 == 0
for n2 in integers
if n2 != n1 and n2 % n1 != 0 and n2 > n1 / 2 and n2 % 3 == 0]
db_rows += [(0.75, n1, n2, float(Number('0.75') * n1),
float(Number('0.75') * n2), 0, 0)
for n1 in integers if n1 % 4 == 0
for n2 in integers
if n2 != n1 and n2 % n1 and n2 > n1 / 2 and n2 % 4 == 0]
db_rows += [(1.125, n1, n2, float(Number('1.125') * n1),
float(Number('1.125') * n2), 0, 0)
for n1 in integers if n1 % 8 == 0 and n1 > 8
for n2 in integers if n2 != n1 and n2 % n1 and n2 > n1 / 2
and n2 % 8 != 0 and n2 % 4 == 0]
db_rows += [(1.2, n1, n2, float(Number('1.2') * n1),
float(Number('1.2') * n2), 0, 0)
for n1 in integers if n1 % 5 == 0
for n2 in integers
if n2 != n1 and n2 % n1 != 0 and n2 > n1 / 2 and n2 % 5 == 0]
db_rows += [(1.25, n1, n2, float(Number('1.25') * n1),
float(Number('1.25') * n2), 0, 0)
for n1 in integers if n1 % 4 == 0
for n2 in integers if n2 != n1 and n2 > n1 / 2 and n2 % 4 != 0
and n2 % 2 == 0 and n2 % n1]
db_rows += [(1.25, n1, n2, float(Number('1.25') * n1),
float(Number('1.25') * n2), 0, 0)
for n1 in integers if n1 % 4 == 0
for n2 in integers if n2 != n1 and n2 > n1 / 2 and n2 % 4 == 0
and n2 >= 41 and n2 % n1]
db_rows += [(1.333333, n1, n2,
float((Number('1.333333') * n1).rounded(Number('0.01'))),
float((Number('1.333333') * n2).rounded(Number('0.01'))), 0, 0)
for n1 in integers if n1 % 3 == 0
for n2 in integers
if n2 != n1 and n2 % n1 != 0 and n2 > n1 / 2 and n2 % 3 == 0]
db_rows += [(1.5, n1, n2, float(Number('1.5') * n1),
float(Number('1.5') * n2), 0, 0)
for n1 in integers
if n1 < 7 or (8 <= n1 <= 24 and n1 % 2 == 0)
or (n1 >= 30 and n1 % 10 == 0)
for n2 in integers
if n2 != n1 and n2 % n1 and n2 > n1 / 2]
db_rows += [(c, 1.5, n2, float(c * Number('1.5')), float(c * n2), 0, 0)
for c in [2, 3, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 30, 40,
50, 60, 80, 100]
for n2 in integers
if n2 != c and n2 > c / 2]
db_rows += [(2.5, n1, n2, float(Number('2.5') * n1),
float(Number('2.5') * n2), 0, 0)
for n1 in integers if n1 <= 10 or (n1 > 10 and n1 % 10 == 0)
if not ((n1 >= 12 and n1 % 10 != 0) or n1 in [7, 9])
for n2 in integers
if n2 != n1 and n2 % n1 and n2 > n1 / 2 and n2 % 2 != 0]
db_rows += [(c, 2.5, n2, float(c * Number('2.5')), float(c * n2), 0, 0)
for c in [2, 3, 4, 5, 6, 8, 10, 20, 30, 40, 50, 60, 80, 100]
for n2 in integers
if n2 != c and n2 > c / 2]
creation_query = '''CREATE TABLE deci_int_triples_for_prop
(id INTEGER PRIMARY KEY, coeff DECIMAL(1, 6),
nb1 DECIMAL(1, 6), nb2 DECIMAL(1, 6),
nb3 DECIMAL(1, 6), solution DECIMAL(1, 6),
locked INTEGER, drawDate INTEGER)'''
db_creation_queries.append(creation_query)
db.execute(creation_query)
db.executemany("INSERT "
"INTO deci_int_triples_for_prop(coeff, nb1, nb2, "
"nb3, solution, locked, drawDate) "
"VALUES(?, ?, ?, ?, ?, ?, ?)",
db_rows)
sys.stderr.write('Insert integers pairs...')
# Tables of 1, 2, 3... INTPAIRS_MAX
db_rows = [(i + 1, j + 1, 0, 0, 0,
_suits_for_deci1(i + 1, j + 1),
_suits_for_deci2(i + 1, j + 1))
for i in range(INTPAIRS_MAX)
for j in range(INTPAIRS_MAX)
if j >= i]
for i in range(100):
sys.stderr.write('\rInsert integers pairs... {} %'.format(i))
db.executemany("INSERT "
"INTO int_pairs(nb1, nb2, lock_equal_products, "
"drawDate, clever, suits_for_deci1, suits_for_deci2) "
"VALUES(?, ?, ?, ?, ?, ?, ?)",
db_rows[i * len(db_rows) // 100:
(i + 1) * len(db_rows) // 100])
sys.stderr.write('\rInsert integers pairs... 100 %\n')
sys.stderr.write('Create integers triples...\n')
# Tables of 1, 2, 3... INTTRIPLES_MAX
db_rows = [(15, 2, 3, 'none', 0, 0, 0, 0, 0, 0),
(15, 2, 5, 'none', 0, 0, 0, 0, 0, 0),
(15, 2, 6, 'none', 0, 0, 0, 0, 0, 0),
(15, 3, 4, 'none', 0, 0, 0, 0, 0, 0),
(15, 3, 5, 'none', 0, 0, 0, 0, 0, 0),
(15, 4, 5, 'none', 0, 0, 0, 0, 0, 0),
(15, 4, 6, 'none', 0, 0, 0, 0, 0, 0),
(15, 5, 6, 'none', 0, 0, 0, 0, 0, 0),
(25, 2, 3, 'none', 0, 0, 0, 0, 0, 0),
(25, 2, 5, 'none', 0, 0, 0, 0, 0, 0),
(25, 2, 6, 'none', 0, 0, 0, 0, 0, 0),
(25, 3, 4, 'none', 0, 0, 0, 0, 0, 0),
(25, 3, 5, 'none', 0, 0, 0, 0, 0, 0),
(25, 4, 5, 'none', 0, 0, 0, 0, 0, 0),
(25, 4, 6, 'none', 0, 0, 0, 0, 0, 0),
(25, 5, 6, 'none', 0, 0, 0, 0, 0, 0)]
db_rows += [(i + 1, j + 1, k + 1, # nb1, nb2, nb3
distcode(i + 1, j + 1, k + 1), # code
k + 1 < i + j + 2, # triangle?
(i == j and j != k) or (i == k and i != j)
or (j == k and i != j), # isosceles? (but not equilateral)
i == j == k, # equilateral?
(k + 1) ** 2 == (i + 1) ** 2 + (j + 1) ** 2, # pythagorean?
(i == j or j == k or k == i), # at least 2 equal sides?
0 # drawDate
)
for i in range(INTTRIPLES_MAX)
for j in range(INTTRIPLES_MAX)
for k in range(INTTRIPLES_MAX)
if k >= j >= i and k - i <= 30]
sys.stderr.write('Insert integers triples...')
for i in range(100):
sys.stderr.write('\rInsert integers triples... {} %'.format(i))
db.executemany("INSERT "
"INTO int_triples(nb1, nb2, nb3, code, triangle, "
"isosceles, equilateral, pythagorean, equal_sides, "
"drawDate) "
"VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
db_rows[i * len(db_rows) // 100:
(i + 1) * len(db_rows) // 100])
sys.stderr.write('\rInsert integers triples... 100 %\n')
creation_query = '''CREATE TABLE int_quadruples
(id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER, nb3 INTEGER,
nb4 INTEGER, code TEXT, quadrilateral INTEGER, equilateral INTEGER,
equal_sides INTEGER, drawDate INTEGER)'''
db_creation_queries.append(creation_query)
db.execute(creation_query)
sys.stderr.write('Create integers quadruples...\n')
# Tables of 1, 2, 3... INTQUADRUPLES_MAX
db_rows = [(i + 1, j + 1, k + 1, n + 1, # nb1, nb2, nb3, nb4
distcode(i + 1, j + 1, k + 1, n + 1), # code
n + 1 < i + j + k + 3, # quadrilateral?
i == j == k == n, # equilateral?
(i == j or j == k or k == i or i == n or j == n or k == n),
# at least 2 equal sides?
0 # drawDate
)
for i in range(INTQUADRUPLES_MAX)
for j in range(INTQUADRUPLES_MAX)
for k in range(INTQUADRUPLES_MAX)
for n in range(INTQUADRUPLES_MAX)
if n >= k >= j >= i and n - i <= 18]
sys.stderr.write('Insert integers quadruples...')
for i in range(100):
sys.stderr.write('\rInsert integers quadruples... {} %'.format(i))
db.executemany("INSERT "
"INTO int_quadruples(nb1, nb2, nb3, nb4, code, "
"quadrilateral, equilateral, equal_sides, "
"drawDate) "
"VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)",
db_rows[i * len(db_rows) // 100:
(i + 1) * len(db_rows) // 100])
sys.stderr.write('\rInsert integers quadruples... 100 %\n')
creation_query = '''CREATE TABLE int_quintuples
(id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER, nb3 INTEGER,
nb4 INTEGER, nb5 INTEGER, code TEXT, pentagon INTEGER,
equilateral INTEGER, equal_sides INTEGER, drawDate INTEGER)'''
db_creation_queries.append(creation_query)
db.execute(creation_query)
sys.stderr.write('Create integers quintuples...\n')
# Tables of 1, 2, 3... INTQUINTUPLES_MAX
db_rows = [(i + 1, j + 1, k + 1, n + 1, p + 1, # nb1, nb2, nb3, nb4, nb5
distcode(i + 1, j + 1, k + 1, n + 1, p + 1), # code
p + 1 < i + j + k + n + 4, # pentagon?
i == j == k == n == p, # equilateral?
(i == j or j == k or k == i or i == n or j == n or k == n
or i == p or j == p or k == p or n == p),
# at least 2 equal sides?
0 # drawDate
)
for i in range(INTQUINTUPLES_MAX)
for j in range(INTQUINTUPLES_MAX)
for k in range(INTQUINTUPLES_MAX)
for n in range(INTQUINTUPLES_MAX)
for p in range(INTQUINTUPLES_MAX)
if p >= n >= k >= j >= i and p - i <= 16]
sys.stderr.write('Insert integers quintuples...')
for i in range(100):
sys.stderr.write('\rInsert integers quintuples... {} %'.format(i))
db.executemany("INSERT "
"INTO int_quintuples(nb1, nb2, nb3, nb4, nb5, code, "
"pentagon, equilateral, equal_sides, "
"drawDate) "
"VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
db_rows[i * len(db_rows) // 100:
(i + 1) * len(db_rows) // 100])
sys.stderr.write('\rInsert integers quintuples... 100 %\n')
creation_query = '''CREATE TABLE int_sextuples
(id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER, nb3 INTEGER,
nb4 INTEGER, nb5 INTEGER, nb6 INTEGER, code TEXT, hexagon INTEGER,
equilateral INTEGER, equal_sides INTEGER, drawDate INTEGER)'''
db_creation_queries.append(creation_query)
db.execute(creation_query)
sys.stderr.write('Create integers sextuples...\n')
# Tables of 1, 2, 3... INTSEXTUPLES_MAX
db_rows = [(i + 1, j + 1, k + 1, n + 1, p + 1, q + 1,
# nb1, nb2, nb3, nb4, nb5, nb6
distcode(i + 1, j + 1, k + 1, n + 1, p + 1, q + 1), # code
q + 1 < i + j + k + n + p + 5, # hexagon?
i == j == k == n == p == q, # equilateral?
(i == j or j == k or k == i or i == n or j == n or k == n
or i == p or j == p or k == p or n == p or i == q or j == q
or k == q or n == q or p == q),
# at least 2 equal sides?
0 # drawDate
)
for i in range(INTSEXTUPLES_MAX)
for j in range(INTSEXTUPLES_MAX)
for k in range(INTSEXTUPLES_MAX)
for n in range(INTSEXTUPLES_MAX)
for p in range(INTSEXTUPLES_MAX)
for q in range(INTSEXTUPLES_MAX)
if q >= p >= n >= k >= j >= i and q - i <= 16]
sys.stderr.write('Insert integers sextuples...')
for i in range(100):
sys.stderr.write('\rInsert integers sextuples... {} %'.format(i))
db.executemany("INSERT "
"INTO int_sextuples(nb1, nb2, nb3, nb4, nb5, nb6, "
"code, hexagon, equilateral, equal_sides, "
"drawDate) "
"VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
db_rows[i * len(db_rows) // 100:
(i + 1) * len(db_rows) // 100])
sys.stderr.write('\rInsert integers sextuples... 100 %\n')
# sys.stderr.flush()
sys.stderr.write('Create natural numbers pairs...\n')
creation_query = '''CREATE TABLE pairs
(id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER, code TEXT,
lock_equal_products INTEGER, drawDate INTEGER, clever INTEGER,
suits_for_deci1 INTEGER, suits_for_deci2 INTEGER)'''
natural_nb_tuples_db_creation_queries.append(creation_query)
natural_nb_tuples_db.execute(creation_query)
# Tables of 1, 2, 3... NNPAIRS_MAX
db_rows = [(i + 1, j + 1, distcode(i + 1, j + 1), 0, 0, 0,
_suits_for_deci1(i + 1, j + 1),
_suits_for_deci2(i + 1, j + 1))
for i in range(NNPAIRS_MAX)
for j in range(NNPAIRS_MAX)
if j >= i]
for i in range(100):
sys.stderr.write('\rInsert natural numbers pairs... {} %'.format(i))
natural_nb_tuples_db.executemany(
"INSERT "
"INTO pairs(nb1, nb2, code, lock_equal_products, "
"drawDate, clever, suits_for_deci1, suits_for_deci2) "
"VALUES(?, ?, ?, ?, ?, ?, ?, ?)",
db_rows[i * len(db_rows) // 100:
(i + 1) * len(db_rows) // 100])
sys.stderr.write('\rInsert natural numbers pairs... 100 %\n')
sys.stderr.write('Setup natural numbers pairs: clever (5)...\n')
for couple in [(2, 5), (2, 50), (2, 500), (5, 20), (5, 200)]:
natural_nb_tuples_db.execute(
"UPDATE pairs SET clever = 5 WHERE nb1 = '" + str(couple[0])
+ "' and nb2 = '" + str(couple[1]) + "';")
sys.stderr.write('Setup natural numbers pairs: clever (4)...\n')
for couple in [(4, 25), (4, 250)]:
natural_nb_tuples_db.execute(
"UPDATE pairs SET clever = 4 WHERE nb1 = '" + str(couple[0])
+ "' and nb2 = '" + str(couple[1]) + "';")
sys.stderr.write('Create natural number×decimal "clever" pairs...\n')
creation_query = '''CREATE TABLE nn_deci_clever_pairs
(id INTEGER, nb1 FLOAT, nb2 FLOAT, drawDate INTEGER,
clever INTEGER)'''
natural_nb_tuples_db_creation_queries.append(creation_query)
natural_nb_tuples_db.execute(creation_query)
sys.stderr.write('Insert natural number×decimal "clever" pairs...\n')
# Insert natural number/decimal "clever" pairs into the db
# The tenths series (only one yet) is identified by a 10
# the quarters series by a 4
# the halfs/fifths series by a 5
start_id = tuple(natural_nb_tuples_db.execute(
"SELECT MAX(id) FROM pairs "))[0][0] + 1
db_rows = list(zip([i + start_id for i in range(5)],
[0.2, 2, 4, 4, 0.1],
[5, 0.5, 0.25, 2.5, 10],
[0, 0, 0, 0, 0],
[5, 5, 4, 4, 10]))
natural_nb_tuples_db.executemany(
"INSERT INTO nn_deci_clever_pairs(id, nb1, nb2, drawDate, clever) "
"VALUES(?, ?, ?, ?, ?)", db_rows)
sys.stderr.write('Create natural numbers triples...\n')
creation_query = '''CREATE TABLE triples
(id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER, nb3 INTEGER,
code TEXT, constructible INTEGER, isosceles INTEGER,
equilateral INTEGER, pythagorean INTEGER, equal_sides INTEGER,
drawDate INTEGER)'''
natural_nb_tuples_db_creation_queries.append(creation_query)
natural_nb_tuples_db.execute(creation_query)
# Tables of 1, 2, 3... NNTRIPLES_MAX
db_rows = [(15, 2, 3, 'none', 0, 0, 0, 0, 0, 0),
(15, 2, 5, 'none', 0, 0, 0, 0, 0, 0),
(15, 2, 6, 'none', 0, 0, 0, 0, 0, 0),
(15, 3, 4, 'none', 0, 0, 0, 0, 0, 0),
(15, 3, 5, 'none', 0, 0, 0, 0, 0, 0),
(15, 4, 5, 'none', 0, 0, 0, 0, 0, 0),
(15, 4, 6, 'none', 0, 0, 0, 0, 0, 0),
(15, 5, 6, 'none', 0, 0, 0, 0, 0, 0),
(25, 2, 3, 'none', 0, 0, 0, 0, 0, 0),
(25, 2, 5, 'none', 0, 0, 0, 0, 0, 0),
(25, 2, 6, 'none', 0, 0, 0, 0, 0, 0),
(25, 3, 4, 'none', 0, 0, 0, 0, 0, 0),
(25, 3, 5, 'none', 0, 0, 0, 0, 0, 0),
(25, 4, 5, 'none', 0, 0, 0, 0, 0, 0),
(25, 4, 6, 'none', 0, 0, 0, 0, 0, 0),
(25, 5, 6, 'none', 0, 0, 0, 0, 0, 0)]
db_rows += [(i + 1, j + 1, k + 1, # nb1, nb2, nb3
distcode(i + 1, j + 1, k + 1), # code
k + 1 < i + j + 2, # constructible triangle?
(i == j and j != k) or (i == k and i != j)
or (j == k and i != j), # isosceles? (but not equilateral)
i == j == k, # equilateral?
(k + 1) ** 2 == (i + 1) ** 2 + (j + 1) ** 2, # pythagorean?
(i == j or j == k or k == i), # at least 2 equal sides?
0 # drawDate
)
for i in range(NNTRIPLES_MAX)
for j in range(NNTRIPLES_MAX)
for k in range(NNTRIPLES_MAX)
if k >= j >= i]
sys.stderr.write('Insert natural numbers triples...')
for i in range(100):
sys.stderr.write('\rInsert natural numbers triples... {} %'
.format(i))
natural_nb_tuples_db\
.executemany("INSERT "
"INTO triples(nb1, nb2, nb3, code, "
"constructible, isosceles, equilateral, pythagorean,"
"equal_sides, drawDate) "
"VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
db_rows[i * len(db_rows) // 100:
(i + 1) * len(db_rows) // 100])
sys.stderr.write('\rInsert natural numbers triples... 100 %\n')
# sys.stderr.flush()
sys.stderr.write('Create natural numbers quadruples...\n')
creation_query = '''CREATE TABLE quadruples
(id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER, nb3 INTEGER,
nb4 INTEGER, code TEXT, constructible INTEGER,
equilateral INTEGER, equal_sides INTEGER, drawDate INTEGER)'''
natural_nb_tuples_db_creation_queries.append(creation_query)
natural_nb_tuples_db.execute(creation_query)
# Tables of 1, 2, 3... NNQUADRUPLES_MAX
db_rows = [(i + 1, j + 1, k + 1, n + 1, # nb1, nb2, nb3, nb4
distcode(i + 1, j + 1, k + 1, n + 1), # code
n + 1 < i + j + k + 3, # constructible quadrilateral?
i == j == k == n, # equilateral?
(i == j or j == k or k == i or i == n or j == n or k == n),
# at least 2 equal sides?
0 # drawDate
)
for i in range(NNQUADRUPLES_MAX)
for j in range(NNQUADRUPLES_MAX)
for k in range(NNQUADRUPLES_MAX)
for n in range(NNQUADRUPLES_MAX)
if n >= k >= j >= i]
sys.stderr.write('Insert natural numbers quadruples...')
for i in range(100):
sys.stderr.write('\rInsert natural numbers quadruples... {} %'
.format(i))
natural_nb_tuples_db\
.executemany("INSERT "
"INTO quadruples(nb1, nb2, nb3, nb4, code, "
"constructible, equilateral, equal_sides, "
"drawDate) "
"VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)",
db_rows[i * len(db_rows) // 100:
(i + 1) * len(db_rows) // 100])
sys.stderr.write('\rInsert natural numbers quadruples... 100 %\n')
# sys.stderr.flush()
sys.stderr.write('Create natural numbers quintuples...\n')
creation_query = '''CREATE TABLE quintuples
(id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER, nb3 INTEGER,
nb4 INTEGER, nb5 INTEGER, code TEXT, constructible INTEGER,
equilateral INTEGER, equal_sides INTEGER, drawDate INTEGER)'''
natural_nb_tuples_db_creation_queries.append(creation_query)
natural_nb_tuples_db.execute(creation_query)
# Tables of 1, 2, 3... NNQUINTUPLES_MAX
db_rows = [(i + 1, j + 1, k + 1, n + 1, p + 1, # nb1, nb2, nb3, nb4, nb5
distcode(i + 1, j + 1, k + 1, n + 1, p + 1), # code
p + 1 < i + j + k + n + 4, # constructible?
i == j == k == n == p, # equilateral?
(i == j or j == k or k == i or i == n or j == n or k == n
or i == p or j == p or k == p or n == p),
# at least 2 equal sides?
0 # drawDate
)
for i in range(NNQUINTUPLES_MAX)
for j in range(NNQUINTUPLES_MAX)
for k in range(NNQUINTUPLES_MAX)
for n in range(NNQUINTUPLES_MAX)
for p in range(NNQUINTUPLES_MAX)
if p >= n >= k >= j >= i]
sys.stderr.write('Insert natural numbers quintuples...')
for i in range(100):
sys.stderr.write('\rInsert natural numbers quintuples... {} %'
.format(i))
natural_nb_tuples_db\
.executemany("INSERT "
"INTO quintuples(nb1, nb2, nb3, nb4, nb5, code, "
"constructible, equilateral, equal_sides, "
"drawDate) "
"VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
db_rows[i * len(db_rows) // 100:
(i + 1) * len(db_rows) // 100])
sys.stderr.write('\rInsert natural numbers quintuples... 100 %\n')
# sys.stderr.flush()
sys.stderr.write('Create natural numbers sextuples...\n')
creation_query = '''CREATE TABLE sextuples
(id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER, nb3 INTEGER,
nb4 INTEGER, nb5 INTEGER, nb6 INTEGER, code TEXT,
constructible INTEGER, equilateral INTEGER, equal_sides INTEGER,
drawDate INTEGER)'''
natural_nb_tuples_db_creation_queries.append(creation_query)
natural_nb_tuples_db.execute(creation_query)
# Tables of 1, 2, 3... NNSEXTUPLES_MAX
db_rows = [(i + 1, j + 1, k + 1, n + 1, p + 1, q + 1,
# nb1, nb2, nb3, nb4, nb5, nb6
distcode(i + 1, j + 1, k + 1, n + 1, p + 1, q + 1), # code
q + 1 < i + j + k + n + p + 5, # constructible hexagon?
i == j == k == n == p == q, # equilateral?
(i == j or j == k or k == i or i == n or j == n or k == n
or i == p or j == p or k == p or n == p or i == q or j == q
or k == q or n == q or p == q),
# at least 2 equal sides?
0 # drawDate
)
for i in range(NNSEXTUPLES_MAX)
for j in range(NNSEXTUPLES_MAX)
for k in range(NNSEXTUPLES_MAX)
for n in range(NNSEXTUPLES_MAX)
for p in range(NNSEXTUPLES_MAX)
for q in range(NNSEXTUPLES_MAX)
if q >= p >= n >= k >= j >= i]
sys.stderr.write('Insert natural numbers sextuples...')
for i in range(100):
sys.stderr.write('\rInsert natural numbers sextuples... {} %'
.format(i))
natural_nb_tuples_db\
.executemany("INSERT "
"INTO sextuples(nb1, nb2, nb3, nb4, nb5, nb6, code, "
"constructible, equilateral, equal_sides, "
"drawDate) "
"VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
db_rows[i * len(db_rows) // 100:
(i + 1) * len(db_rows) // 100])
sys.stderr.write('\rInsert natural numbers sextuples... 100 %\n')
# sys.stderr.flush()
sys.stderr.write('Setup integers pairs: clever (5)...\n')
for couple in [(2, 5), (2, 50), (2, 500), (5, 20), (5, 200)]:
db.execute("UPDATE int_pairs SET clever = 5"
+ " WHERE nb1 = '" + str(couple[0])
+ "' and nb2 = '" + str(couple[1]) + "';")
sys.stderr.write('Setup integers pairs: clever (4)...\n')
for couple in [(4, 25), (4, 250)]:
db.execute("UPDATE int_pairs SET clever = 4"
+ " WHERE nb1 = '" + str(couple[0])
+ "' and nb2 = '" + str(couple[1]) + "';")
sys.stderr.write('Insert integer×decimal "clever" pairs...\n')
# Insert integer/decimal "clever" pairs into the db
# The tenths series (only one yet) is identified by a 10
# the quarters series by a 4
# the halfs/fifths series by a 5
start_id = tuple(db.execute("SELECT MAX(id) FROM int_pairs "))[0][0] + 1
db_rows = list(zip([i + start_id for i in range(5)],
[0.2, 2, 4, 4, 0.1],
[5, 0.5, 0.25, 2.5, 10],
[0, 0, 0, 0, 0],
[5, 5, 4, 4, 10]))
db.executemany("INSERT "
"INTO int_deci_clever_pairs(id, nb1, nb2, drawDate, "
"clever) "
"VALUES(?, ?, ?, ?, ?)",
db_rows)
sys.stderr.write('Insert single integers...\n')
# Single ints
db_rows = [(i + 1, 0) for i in range(SINGLEINTS_MAX)]
db.executemany("INSERT "
"INTO single_ints(nb1, drawDate) "
"VALUES(?, ?)",
db_rows)
sys.stderr.write('Insert simple fractions...\n')
db_rows = [(i + 1, j + 1, 0 if gcd(i + 1, j + 1) == 1 else 1, 0)
for i in range(10)
for j in range(10)
if j > i]
db.executemany("INSERT "
"INTO simple_fractions(nb1, nb2, reducible, drawDate) "
"VALUES(?, ?, ?, ?)",
db_rows)
sys.stderr.write('Insert single decimals from 0.0 to 100.0...\n')
# Single decimal numbers
db_rows = [(i / 10, 0) for i in range(1001)]
db.executemany("INSERT "
"INTO single_deci1(nb1, drawDate) "
"VALUES(?, ?)",
db_rows)
sys.stderr.write('Generate single decimals from 0.001 to 10.000...')
# Single decimal numbers
db_rows = []
for j in range(100):
sys.stderr.write(
'\rGenerate single decimals from 0.001 to 10.000... {} %'
.format(j))
db_rows += [((100 * j + i + 1) / 1000,
Number((Decimal(100 * j + i + 1)) / Decimal(1000))
.nonzero_digits_nb(),
Number((Decimal(100 * j + i + 1)) / Decimal(1000))
.isolated_zeros(),
Number((Decimal(100 * j + i + 1)) / Decimal(1000))
.fracdigits_nb(),
Number((Decimal(100 * j + i + 1)) / Decimal(1000))
.overlap_level(),
Number((Decimal(100 * j + i + 1)) / Decimal(1000))
.is_pure_half(),
Number((Decimal(100 * j + i + 1)) / Decimal(1000))
.is_pure_quarter(),
0)
for i in range(100)]
sys.stderr.write('\rGenerate single decimals from 0.001 to 10.000...'
' 100 %\n')
sys.stderr.write('Insert single decimals from 0.001 to 10.000...\n')
db.executemany("INSERT "
"INTO decimals(nb1, nz, iz, fd, overlap_level, "
"pure_half, pure_quarter, drawDate) "
"VALUES(?, ?, ?, ?, ?, ?, ?, ?)",
db_rows)
sys.stderr.write('Insert angle ranges...\n')
# Angle ranges
db_rows = [(i - 20, i + 20, 0) for i in [0, 90, 180, 270]]
db.executemany("INSERT "
"INTO angle_ranges(nb1, nb2, drawDate) "
"VALUES(?, ?, ?)",
db_rows)
sys.stderr.write('Insert variants of order_of_operations...\n')
# Variant numbers for order_of_operations questions.
db_rows = [(i, 0) for i in range(24)]
db.executemany("INSERT "
"INTO order_of_operations_variants"
"(nb1, drawDate) "
"VALUES(?, ?)",
db_rows)
db_rows = [(i + 100, 0) for i in range(88)]
db.executemany("INSERT "
"INTO order_of_operations_variants"
"(nb1, drawDate) "
"VALUES(?, ?)",
db_rows)
sys.stderr.write('Insert unit conversions...\n')
db_rows = [('km', 'hm', 'right', 'length', 1, 1, 0), # simple units,
('hm', 'dam', 'right', 'length', 1, 1, 0), # one column or
('dam', 'm', 'right', 'length', 1, 1, 0), # "classic"
('m', 'dm', 'right', 'length', 1, 1, 0), # conversions
('dm', 'cm', 'right', 'length', 1, 1, 0),
('cm', 'mm', 'right', 'length', 1, 1, 0),
('km', 'm', 'right', 'length', 1, 1, 0),
('m', 'cm', 'right', 'length', 1, 1, 0),
('m', 'mm', 'right', 'length', 1, 1, 0),
('hL', 'daL', 'right', 'capacity', 1, 1, 0),
('daL', 'L', 'right', 'capacity', 1, 1, 0),
('L', 'dL', 'right', 'capacity', 1, 1, 0),
('dL', 'cL', 'right', 'capacity', 1, 1, 0),
('cL', 'mL', 'right', 'capacity', 1, 1, 0),
('hL', 'L', 'right', 'capacity', 1, 1, 0),
('kg', 'hg', 'right', 'mass', 1, 1, 0),
('hg', 'dag', 'right', 'mass', 1, 1, 0),
('dag', 'g', 'right', 'mass', 1, 1, 0),
('g', 'dg', 'right', 'mass', 1, 1, 0),
('dg', 'cg', 'right', 'mass', 1, 1, 0),
('cg', 'mg', 'right', 'mass', 1, 1, 0),
('kg', 'g', 'right', 'mass', 1, 1, 0),
('hm', 'km', 'left', 'length', 1, 1, 0),
('dam', 'hm', 'left', 'length', 1, 1, 0),
('m', 'dam', 'left', 'length', 1, 1, 0),
('dm', 'm', 'left', 'length', 1, 1, 0),
('cm', 'dm', 'left', 'length', 1, 1, 0),
('mm', 'cm', 'left', 'length', 1, 1, 0),
('m', 'km', 'left', 'length', 1, 1, 0),
('cm', 'm', 'left', 'length', 1, 1, 0),
('daL', 'hL', 'left', 'capacity', 1, 1, 0),
('L', 'daL', 'left', 'capacity', 1, 1, 0),
('dL', 'L', 'left', 'capacity', 1, 1, 0),
('cL', 'dL', 'left', 'capacity', 1, 1, 0),
('mL', 'cL', 'left', 'capacity', 1, 1, 0),
('L', 'hL', 'left', 'capacity', 1, 1, 0),
('hg', 'kg', 'left', 'mass', 1, 1, 0),
('dag', 'hg', 'left', 'mass', 1, 1, 0),
('g', 'dag', 'left', 'mass', 1, 1, 0),
('dg', 'g', 'left', 'mass', 1, 1, 0),
('cg', 'dg', 'left', 'mass', 1, 1, 0),
('mg', 'cg', 'left', 'mass', 1, 1, 0),
('g', 'kg', 'left', 'mass', 1, 1, 0),
('km', 'dam', 'right', 'length', 2, 1, 0), # two columns
('hm', 'm', 'right', 'length', 2, 1, 0),
('dam', 'dm', 'right', 'length', 2, 1, 0),
('dm', 'mm', 'right', 'length', 2, 1, 0),
('daL', 'dL', 'right', 'capacity', 2, 1, 0),
('L', 'cL', 'right', 'capacity', 2, 1, 0),
('dL', 'mL', 'right', 'capacity', 2, 1, 0),
('kg', 'dag', 'right', 'mass', 2, 1, 0),
('hg', 'g', 'right', 'mass', 2, 1, 0),
('dag', 'dg', 'right', 'mass', 2, 1, 0),
('g', 'cg', 'right', 'mass', 2, 1, 0),
('dg', 'mg', 'right', 'mass', 2, 1, 0),
('dam', 'km', 'left', 'length', 2, 1, 0),
('m', 'hm', 'left', 'length', 2, 1, 0),
('dm', 'dam', 'left', 'length', 2, 1, 0),
('mm', 'dm', 'left', 'length', 2, 1, 0),
('dL', 'daL', 'left', 'capacity', 2, 1, 0),
('cL', 'L', 'left', 'capacity', 2, 1, 0),
('mL', 'dL', 'left', 'capacity', 2, 1, 0),
('dag', 'kg', 'left', 'mass', 2, 1, 0),
('g', 'hg', 'left', 'mass', 2, 1, 0),
('dg', 'dag', 'left', 'mass', 2, 1, 0),
('cg', 'g', 'left', 'mass', 2, 1, 0),
('mg', 'dg', 'left', 'mass', 2, 1, 0),
('hm', 'dm', 'right', 'length', 3, 1, 0), # three columns
('dam', 'cm', 'right', 'length', 3, 1, 0),
('dm', 'hm', 'left', 'length', 3, 1, 0),
('cm', 'dam', 'left', 'length', 3, 1, 0),
('km', 'hm', 'right', 'area', 2, 2, 0), # area: 1 column [2]
('hm', 'dam', 'right', 'area', 2, 2, 0),
('dam', 'm', 'right', 'area', 2, 2, 0),
('m', 'dm', 'right', 'area', 2, 2, 0),
('dm', 'cm', 'right', 'area', 2, 2, 0),
('cm', 'mm', 'right', 'area', 2, 2, 0),
('hm', 'km', 'left', 'area', 2, 2, 0),
('dam', 'hm', 'left', 'area', 2, 2, 0),
('m', 'dam', 'left', 'area', 2, 2, 0),
('dm', 'm', 'left', 'area', 2, 2, 0),
('cm', 'dm', 'left', 'area', 2, 2, 0),
('mm', 'cm', 'left', 'area', 2, 2, 0),
('km', 'dam', 'right', 'area', 4, 2, 0), # area: 2 columns [4]
('hm', 'm', 'right', 'area', 4, 2, 0),
('dam', 'dm', 'right', 'area', 4, 2, 0),
('m', 'cm', 'right', 'area', 4, 2, 0),
('dm', 'mm', 'right', 'area', 4, 2, 0),
('dam', 'km', 'left', 'area', 4, 2, 0),
('m', 'hm', 'left', 'area', 4, 2, 0),
('dm', 'dam', 'left', 'area', 4, 2, 0),
('cm', 'm', 'left', 'area', 4, 2, 0),
('mm', 'dm', 'left', 'area', 4, 2, 0),
('km', 'hm', 'right', 'volume', 3, 3, 0), # vol: 1 column [3]
('hm', 'dam', 'right', 'volume', 3, 3, 0),
('dam', 'm', 'right', 'volume', 3, 3, 0),
('m', 'dm', 'right', 'volume', 3, 3, 0),
('dm', 'cm', 'right', 'volume', 3, 3, 0),
('cm', 'mm', 'right', 'volume', 3, 3, 0),
('hm', 'km', 'left', 'volume', 3, 3, 0),
('dam', 'hm', 'left', 'volume', 3, 3, 0),
('m', 'dam', 'left', 'volume', 3, 3, 0),
('dm', 'm', 'left', 'volume', 3, 3, 0),
('cm', 'dm', 'left', 'volume', 3, 3, 0),
('mm', 'cm', 'left', 'volume', 3, 3, 0),
('km', 'dam', 'right', 'volume', 6, 3, 0), # vol: 2 columns [6]
('hm', 'm', 'right', 'volume', 6, 3, 0),
('dam', 'dm', 'right', 'volume', 6, 3, 0),
('m', 'cm', 'right', 'volume', 6, 3, 0),
('dm', 'mm', 'right', 'volume', 6, 3, 0),
('dam', 'km', 'left', 'volume', 6, 3, 0),
('m', 'hm', 'left', 'volume', 6, 3, 0),
('dm', 'dam', 'left', 'volume', 6, 3, 0),
('cm', 'm', 'left', 'volume', 6, 3, 0),
('mm', 'dm', 'left', 'volume', 6, 3, 0),
# vol -> capacity
('dm', 'L', 'none', 'volume2capacity', 3, 3, 0),
('cm', 'mL', 'none', 'volume2capacity', 3, 3, 0),
('m', 'L', 'right', 'volume2capacity', 4, 3, 0),
('dm', 'mL', 'right', 'volume2capacity', 4, 3, 0),
('m', 'hL', 'right', 'volume2capacity', 7, 3, 0),
('m', 'daL', 'right', 'volume2capacity', 7, 3, 0),
('m', 'dL', 'right', 'volume2capacity', 7, 3, 0),
('m', 'cL', 'right', 'volume2capacity', 7, 3, 0),
('m', 'mL', 'right', 'volume2capacity', 7, 3, 0),
('dm', 'hL', 'left', 'volume2capacity', 7, 3, 0),
('dm', 'daL', 'left', 'volume2capacity', 7, 3, 0),
('dm', 'dL', 'right', 'volume2capacity', 7, 3, 0),
('dm', 'cL', 'right', 'volume2capacity', 7, 3, 0),
('cm', 'hL', 'left', 'volume2capacity', 7, 3, 0),
('cm', 'daL', 'left', 'volume2capacity', 7, 3, 0),
('cm', 'L', 'left', 'volume2capacity', 4, 3, 0),
('cm', 'dL', 'left', 'volume2capacity', 7, 3, 0),
('cm', 'cL', 'left', 'volume2capacity', 7, 3, 0),
('mm', 'hL', 'left', 'volume2capacity', 8, 3, 0),
('mm', 'daL', 'left', 'volume2capacity', 8, 3, 0),
('mm', 'L', 'left', 'volume2capacity', 8, 3, 0),
('mm', 'dL', 'left', 'volume2capacity', 8, 3, 0),
('mm', 'cL', 'left', 'volume2capacity', 8, 3, 0),
('mm', 'mL', 'left', 'volume2capacity', 7, 3, 0),
# capacity -> vol
('L', 'dm', 'none', 'capacity2volume', 3, 3, 0),
('mL', 'cm', 'none', 'capacity2volume', 3, 3, 0),
('L', 'm', 'left', 'capacity2volume', 4, 3, 0),
('mL', 'dm', 'left', 'capacity2volume', 4, 3, 0),
('hL', 'm', 'left', 'capacity2volume', 7, 3, 0),
('daL', 'm', 'left', 'capacity2volume', 7, 3, 0),
('dL', 'm', 'left', 'capacity2volume', 7, 3, 0),
('cL', 'm', 'left', 'capacity2volume', 7, 3, 0),
('mL', 'm', 'left', 'capacity2volume', 7, 3, 0),
('hL', 'dm', 'right', 'capacity2volume', 7, 3, 0),
('daL', 'dm', 'right', 'capacity2volume', 7, 3, 0),
('dL', 'dm', 'left', 'capacity2volume', 7, 3, 0),
('cL', 'dm', 'left', 'capacity2volume', 7, 3, 0),
('hL', 'cm', 'right', 'capacity2volume', 7, 3, 0),
('daL', 'cm', 'right', 'capacity2volume', 7, 3, 0),
('L', 'cm', 'right', 'capacity2volume', 4, 3, 0),
('dL', 'cm', 'right', 'capacity2volume', 7, 3, 0),
('cL', 'cm', 'right', 'capacity2volume', 7, 3, 0),
('hL', 'mm', 'right', 'capacity2volume', 8, 3, 0),
('daL', 'mm', 'right', 'capacity2volume', 8, 3, 0),
('L', 'mm', 'right', 'capacity2volume', 8, 3, 0),
('dL', 'mm', 'right', 'capacity2volume', 8, 3, 0),
('cL', 'mm', 'right', 'capacity2volume', 8, 3, 0),
('mL', 'mm', 'right', 'capacity2volume', 7, 3, 0),
]
db.executemany("INSERT "
"INTO units_conversions"
"(unit1, unit2, direction, category, level, dimension, "
"drawDate) "
"VALUES(?, ?, ?, ?, ?, ?, ?)",
db_rows)
sys.stderr.write('Insert digits places...\n')
db_rows = [(str(elt), 0) for elt in DIGITSPLACES]
db.executemany("INSERT "
"INTO digits_places"
"(place, drawDate) "
"VALUES(?, ?)",
db_rows)
sys.stderr.write('Insert fractional digits places...\n')
db_rows = [(str(elt), 0) for elt in DIGITSPLACES_DECIMAL]
db.executemany("INSERT "
"INTO fracdigits_places"
"(place, drawDate) "
"VALUES(?, ?)",
db_rows)
sys.stderr.write('Insert dvipsnames selection...\n')
db_rows = [('Apricot', 0), ('BurntOrange', 0), ('Dandelion', 0),
('Goldenrod', 0), ('Lavender', 0), ('LimeGreen', 0),
('NavyBlue', 0), ('Red', 0), ('SkyBlue', 0), ('Periwinkle', 0)]
db.executemany("INSERT "
"INTO dvipsnames_selection(color_name, drawDate) "
"VALUES(?, ?)",
db_rows)
sys.stderr.write('Insert line segments\' marks...\n')
creation_query = '''CREATE TABLE ls_marks
(id INTEGER PRIMARY KEY, mark TEXT,
drawDate INTEGER)'''
db_creation_queries.append(creation_query)
db.execute(creation_query)
db_rows = [('|', 0), ('||', 0), ('|||', 0), ('O', 0), (r'\triangle', 0),
(r'\square', 0), (r'\lozenge', 0), (r'\bigstar', 0)]
db.executemany("INSERT "
"INTO ls_marks(mark, drawDate) "
"VALUES(?, ?)",
db_rows)
anglessets_db_creation_queries = []
sys.stderr.write('Anglessets db: insert anglessets...\n')
creation_query = '''CREATE TABLE anglessets
(id INTEGER PRIMARY KEY,
nbof_angles INTEGER, distcode TEXT, variant INTEGER,
nbof_right_angles INTEGER, equal_angles TEXT,
table2 INTEGER, table3 INTEGER,
table4 INTEGER, table5 INTEGER, table6 INTEGER,
drawDate INTEGER)'''
anglessets_db_creation_queries.append(creation_query)
anglessets_db.execute(creation_query)
db_rows = [(2, '1_1', 0, 0, 'all_different', 0, 0, 0, 0, 0, 0),
(2, '1_1r', 0, 1, 'all_different', 0, 0, 0, 0, 0, 0),
(2, '1_1r', 1, 1, 'all_different', 0, 0, 0, 0, 0, 0),
(2, '2', 0, 0, 'equilateral', 1, 0, 0, 0, 0, 0),
(3, '1_1_1', 0, 0, 'all_different', 0, 0, 0, 0, 0, 0),
(3, '1_1_1r', 0, 1, 'all_different', 0, 0, 0, 0, 0, 0),
(3, '1_1_1r', 1, 1, 'all_different', 0, 0, 0, 0, 0, 0),
(3, '1_1_1r', 2, 1, 'all_different', 0, 0, 0, 0, 0, 0),
(3, '2_1', 0, 0, 'none', 1, 0, 0, 0, 0, 0),
(3, '2_1', 1, 0, 'none', 1, 0, 0, 0, 0, 0),
(3, '2_1', 2, 0, 'none', 1, 0, 0, 0, 0, 0),
(3, '2_1r', 0, 1, 'none', 1, 0, 0, 0, 0, 0),
(3, '2_1r', 1, 1, 'none', 1, 0, 0, 0, 0, 0),
(3, '2_1r', 2, 1, 'none', 1, 0, 0, 0, 0, 0),
(3, '3', 0, 0, 'equilateral', 0, 1, 0, 0, 0, 0)]
anglessets_db.executemany(
"INSERT INTO anglessets("
"nbof_angles, distcode, variant, nbof_right_angles, equal_angles, "
"table2, table3, table4, table5, table6, drawDate) "
"VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
db_rows)
sys.stderr.write('Anglessets db: insert anglessets subvariants...\n')
creation_query = '''CREATE TABLE _1_1_subvariants
(id INTEGER PRIMARY KEY, subvariant_nb,
drawDate INTEGER)'''
anglessets_db_creation_queries.append(creation_query)
anglessets_db.execute(creation_query)
db_rows = [(1, 0), (2, 0), (3, 0)]
anglessets_db.executemany(
"INSERT INTO _1_1_subvariants(subvariant_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE _1_1r_subvariants
(id INTEGER PRIMARY KEY, subvariant_nb,
drawDate INTEGER)'''
anglessets_db_creation_queries.append(creation_query)
anglessets_db.execute(creation_query)
db_rows = [(1, 0), (2, 0), (3, 0)]
anglessets_db.executemany(
"INSERT INTO _1_1r_subvariants(subvariant_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE _2_subvariants
(id INTEGER PRIMARY KEY, subvariant_nb,
drawDate INTEGER)'''
anglessets_db_creation_queries.append(creation_query)
anglessets_db.execute(creation_query)
db_rows = [(1, 0), (2, 0), (3, 0)]
anglessets_db.executemany(
"INSERT INTO _2_subvariants(subvariant_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE _1_1_1_subvariants
(id INTEGER PRIMARY KEY, subvariant_nb,
drawDate INTEGER)'''
anglessets_db_creation_queries.append(creation_query)
anglessets_db.execute(creation_query)
db_rows = [(1, 0), (2, 0), (3, 0)]
anglessets_db.executemany(
"INSERT INTO _1_1_1_subvariants(subvariant_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE _1_1_1r_subvariants
(id INTEGER PRIMARY KEY, subvariant_nb,
drawDate INTEGER)'''
anglessets_db_creation_queries.append(creation_query)
anglessets_db.execute(creation_query)
db_rows = [(1, 0)]
anglessets_db.executemany(
"INSERT INTO _1_1_1r_subvariants(subvariant_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE _2_1_subvariants
(id INTEGER PRIMARY KEY, subvariant_nb,
drawDate INTEGER)'''
anglessets_db_creation_queries.append(creation_query)
anglessets_db.execute(creation_query)
db_rows = [(1, 0)]
anglessets_db.executemany(
"INSERT INTO _2_1_subvariants(subvariant_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE _2_1r_subvariants
(id INTEGER PRIMARY KEY, subvariant_nb,
drawDate INTEGER)'''
anglessets_db_creation_queries.append(creation_query)
anglessets_db.execute(creation_query)
db_rows = [(1, 0)]
anglessets_db.executemany(
"INSERT INTO _2_1r_subvariants(subvariant_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE _3_subvariants
(id INTEGER PRIMARY KEY, subvariant_nb,
drawDate INTEGER)'''
anglessets_db_creation_queries.append(creation_query)
anglessets_db.execute(creation_query)
db_rows = [(1, 0), (2, 0), (3, 0)]
anglessets_db.executemany(
"INSERT INTO _3_subvariants(subvariant_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
shapes_db_creation_queries = []
sys.stderr.write('Shapes db: insert polygons...\n')
creation_query = '''CREATE TABLE polygons
(id INTEGER PRIMARY KEY,
sides_nb INTEGER, type TEXT, special TEXT,
codename TEXT, sides_particularity TEXT,
level INTEGER, variant INTEGER,
table2 INTEGER, table3 INTEGER, table4 INTEGER,
table5 INTEGER, table6 INTEGER,
drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(3, 'triangle', 'scalene_triangle', 'triangle_1_1_1',
'all_different', 2, 0, 0, 0, 0, 0, 0, 0),
(3, 'triangle', 'right_triangle', 'triangle_1_1_1',
'all_different', 2, 1, 0, 0, 0, 0, 0, 0),
(3, 'triangle', 'isosceles_triangle', 'triangle_2_1', 'none',
2, 0, 1, 0, 0, 0, 0, 0),
(3, 'triangle', 'equilateral_triangle', 'triangle_3',
'equilateral', 1, 0, 0, 1, 0, 0, 0, 0),
(4, 'quadrilateral', '', 'quadrilateral_1_1_1_1',
'all_different', 3, 0, 0, 0, 0, 0, 0, 0),
(4, 'quadrilateral', '', 'quadrilateral_2_1_1', 'none',
3, 0, 1, 0, 0, 0, 0, 0),
(4, 'quadrilateral', '', 'quadrilateral_2_1_1', 'none',
3, 1, 1, 0, 0, 0, 0, 0),
(4, 'quadrilateral', 'kite', 'quadrilateral_2_2', 'none',
3, 0, 1, 0, 0, 0, 0, 0),
(4, 'quadrilateral', 'parallelelogram', 'quadrilateral_2_2',
'none', 3, 1, 1, 0, 0, 0, 0, 0),
(4, 'quadrilateral', 'rectangle', 'quadrilateral_2_2', 'none',
2, 2, 1, 0, 0, 0, 0, 0),
(4, 'quadrilateral', '', 'quadrilateral_3_1', 'none',
2, 0, 0, 1, 0, 0, 0, 0),
(4, 'quadrilateral', 'rhombus', 'quadrilateral_4',
'equilateral', 1, 0, 0, 0, 1, 0, 0, 0),
(4, 'quadrilateral', 'square', 'quadrilateral_4', 'equilateral',
1, 1, 0, 0, 1, 0, 0, 0),
(5, 'pentagon', '', 'pentagon_1_1_1_1_1', 'all_different',
4, 0, 0, 0, 0, 0, 0, 0),
(5, 'pentagon', '', 'pentagon_2_1_1_1', 'none',
4, 0, 1, 0, 0, 0, 0, 0),
(5, 'pentagon', '', 'pentagon_2_1_1_1', 'none',
4, 1, 1, 0, 0, 0, 0, 0),
(5, 'pentagon', '', 'pentagon_2_2_1', 'none',
4, 0, 1, 0, 0, 0, 0, 0),
(5, 'pentagon', '', 'pentagon_2_2_1', 'none',
4, 1, 1, 0, 0, 0, 0, 0),
(5, 'pentagon', '', 'pentagon_2_2_1', 'none',
4, 2, 1, 0, 0, 0, 0, 0),
(5, 'pentagon', '', 'pentagon_3_1_1', 'none',
3, 0, 0, 1, 0, 0, 0, 0),
(5, 'pentagon', '', 'pentagon_3_1_1', 'none',
3, 1, 0, 1, 0, 0, 0, 0),
(5, 'pentagon', '', 'pentagon_3_2', 'none',
3, 0, 1, 1, 0, 0, 0, 0),
(5, 'pentagon', '', 'pentagon_3_2', 'none',
3, 1, 1, 1, 0, 0, 0, 0),
(5, 'pentagon', '', 'pentagon_4_1', 'none',
2, 0, 0, 0, 1, 0, 0, 0),
(5, 'pentagon', '', 'pentagon_5', 'equilateral',
1, 0, 0, 0, 0, 1, 0, 0),
(6, 'hexagon', '', 'hexagon_1_1_1_1_1_1', 'all_different',
5, 0, 0, 0, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_2_1_1_1_1', 'none',
5, 0, 1, 0, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_2_1_1_1_1', 'none',
5, 1, 1, 0, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_2_1_1_1_1', 'none',
5, 2, 1, 0, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_2_2_1_1', 'none',
5, 0, 1, 0, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_2_2_1_1', 'none',
5, 1, 1, 0, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_2_2_1_1', 'none',
5, 2, 1, 0, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_2_2_1_1', 'none',
5, 3, 1, 0, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_2_2_1_1', 'none',
5, 4, 1, 0, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_2_2_1_1', 'none',
5, 5, 1, 0, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_2_2_1_1', 'none',
5, 6, 1, 0, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_2_2_1_1', 'none',
5, 7, 1, 0, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_2_2_2', 'none',
3, 0, 1, 0, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_2_2_2', 'none',
3, 1, 1, 0, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_2_2_2', 'none',
3, 2, 1, 0, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_2_2_2', 'none',
3, 3, 1, 0, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_3_1_1_1', 'none',
4, 0, 0, 1, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_3_1_1_1', 'none',
4, 1, 0, 1, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_3_1_1_1', 'none',
4, 2, 0, 1, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_3_2_1', 'none',
4, 0, 1, 1, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_3_2_1', 'none',
4, 1, 1, 1, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_3_2_1', 'none',
4, 2, 1, 1, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_3_3', 'none',
3, 0, 0, 1, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_3_3', 'none',
3, 1, 0, 1, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_3_3', 'none',
3, 2, 0, 1, 0, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_4_1_1', 'none',
3, 0, 0, 0, 1, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_4_1_1', 'none',
3, 1, 0, 0, 1, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_4_1_1', 'none',
3, 2, 0, 0, 1, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_4_2', 'none',
3, 0, 1, 0, 1, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_4_2', 'none',
3, 1, 1, 0, 1, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_4_2', 'none',
3, 2, 1, 0, 1, 0, 0, 0),
(6, 'hexagon', '', 'hexagon_5_1', 'none',
2, 0, 0, 0, 0, 1, 0, 0),
(6, 'hexagon', '', 'hexagon_6', 'equilateral',
1, 0, 0, 0, 0, 0, 1, 0)]
shapes_db.executemany(
"INSERT INTO polygons("
"sides_nb, type, special, codename, sides_particularity, level, "
"variant, table2, table3, table4, table5, table6, drawDate) "
"VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
db_rows)
sys.stderr.write('Shapes db: insert shapes variants: scalene triangles...')
creation_query = '''CREATE TABLE scalene_triangle_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), (2, 0), (3, 0), (4, 0)]
shapes_db.executemany(
"INSERT INTO scalene_triangle_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
sys.stderr.write('\rShapes db: insert shapes variants: scalene triangles, '
'right triangles...')
creation_query = '''CREATE TABLE right_triangle_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0), (7, 0), (8, 0)]
shapes_db.executemany(
"INSERT INTO right_triangle_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
sys.stderr.write('\rShapes db: insert shapes variants: scalene triangles, '
'right triangles, isosceles triangles...')
creation_query = '''CREATE TABLE triangle_2_1_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0), (7, 0), (8, 0)]
shapes_db.executemany(
"INSERT INTO triangle_2_1_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
sys.stderr.write('\rShapes db: insert shapes variants: scalene triangles, '
'right triangles, isosceles triangles, equilateral '
'triangles...')
creation_query = '''CREATE TABLE triangle_3_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), (2, 0), (3, 0), (4, 0)]
shapes_db.executemany(
"INSERT INTO triangle_3_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
sys.stderr.write('\rShapes db: insert shapes variants: scalene triangles, '
'right triangles, isosceles triangles, equilateral '
'triangles, quadrilaterals...')
creation_query = '''CREATE TABLE quadrilateral_1_1_1_1_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0)]
shapes_db.executemany(
"INSERT INTO quadrilateral_1_1_1_1_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE quadrilateral_2_1_1_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), (2, 0), (3, 0), (4, 0)]
shapes_db.executemany(
"INSERT INTO quadrilateral_2_1_1_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE quadrilateral_2_2_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), (2, 0), (3, 0), (4, 0)]
shapes_db.executemany(
"INSERT INTO quadrilateral_2_2_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE quadrilateral_3_1_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), (2, 0), (3, 0), (4, 0)]
shapes_db.executemany(
"INSERT INTO quadrilateral_3_1_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE quadrilateral_4_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0)]
shapes_db.executemany(
"INSERT INTO quadrilateral_4_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
sys.stderr.write('\rShapes db: insert shapes variants: scalene triangles, '
'right triangles, isosceles triangles, equilateral '
'triangles, quadrilaterals, pentagons...\n')
creation_query = '''CREATE TABLE pentagon_1_1_1_1_1_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), (2, 0)]
shapes_db.executemany(
"INSERT INTO pentagon_1_1_1_1_1_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE pentagon_2_1_1_1_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), (2, 0)]
shapes_db.executemany(
"INSERT INTO pentagon_2_1_1_1_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE pentagon_2_2_1_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), (2, 0)]
shapes_db.executemany(
"INSERT INTO pentagon_2_2_1_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE pentagon_3_1_1_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), (2, 0)]
shapes_db.executemany(
"INSERT INTO pentagon_3_1_1_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE pentagon_3_2_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), (2, 0)]
shapes_db.executemany(
"INSERT INTO pentagon_3_2_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE pentagon_4_1_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), (2, 0)]
shapes_db.executemany(
"INSERT INTO pentagon_4_1_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE pentagon_5_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), (2, 0)]
shapes_db.executemany(
"INSERT INTO pentagon_5_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE hexagon_1_1_1_1_1_1_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), (2, 0)]
shapes_db.executemany(
"INSERT INTO hexagon_1_1_1_1_1_1_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE hexagon_2_1_1_1_1_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), (2, 0)]
shapes_db.executemany(
"INSERT INTO hexagon_2_1_1_1_1_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE hexagon_2_2_1_1_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), ]
shapes_db.executemany(
"INSERT INTO hexagon_2_2_1_1_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE hexagon_2_2_2_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), ]
shapes_db.executemany(
"INSERT INTO hexagon_2_2_2_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE hexagon_3_1_1_1_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), ]
shapes_db.executemany(
"INSERT INTO hexagon_3_1_1_1_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE hexagon_3_2_1_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), ]
shapes_db.executemany(
"INSERT INTO hexagon_3_2_1_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE hexagon_3_3_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), ]
shapes_db.executemany(
"INSERT INTO hexagon_3_3_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE hexagon_4_1_1_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), ]
shapes_db.executemany(
"INSERT INTO hexagon_4_1_1_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE hexagon_4_2_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), ]
shapes_db.executemany(
"INSERT INTO hexagon_4_2_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE hexagon_5_1_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), (2, 0), ]
shapes_db.executemany(
"INSERT INTO hexagon_5_1_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
creation_query = '''CREATE TABLE hexagon_6_shapes
(id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)'''
shapes_db_creation_queries.append(creation_query)
shapes_db.execute(creation_query)
db_rows = [(1, 0), (2, 0), ]
shapes_db.executemany(
"INSERT INTO hexagon_6_shapes(shape_nb, drawDate) "
"VALUES(?, ?)",
db_rows)
solids_db_creation_queries = []
sys.stderr.write('Solids db: insert solids...\n')
# type will be: cuboid, cube, prism etc.
creation_query = '''CREATE TABLE polyhedra
(id INTEGER PRIMARY KEY,
faces_nb INTEGER, type TEXT, variant INTEGER,
drawDate INTEGER)'''
solids_db_creation_queries.append(creation_query)
solids_db.execute(creation_query)
db_rows = [(6, 'rightcuboid', 0, 0),
(6, 'rightcuboid', 1, 0),
(6, 'rightcuboid', 2, 0),
(6, 'rightcuboid', 3, 0),
(6, 'rightcuboid', 4, 0),
(6, 'rightcuboid', 5, 0),
]
solids_db.executemany(
"INSERT INTO polyhedra("
"faces_nb, type, variant, drawDate) "
"VALUES(?, ?, ?, ?)",
db_rows)
sys.stderr.write('Commit changes to databases...\n')
db.commit()
shapes_db.commit()
solids_db.commit()
anglessets_db.commit()
natural_nb_tuples_db.commit()
sys.stderr.write('Close databases...\n')
db.close()
shapes_db.close()
solids_db.close()
anglessets_db.close()
natural_nb_tuples_db.close()
sys.stderr.write('Create databases\' indices...\n')
db_index = {}
for qr in db_creation_queries:
key, value = parse_sql_creation_query(qr)
db_index.update({key: value})
with open(settings.db_index_path, 'w') as f:
json.dump(db_index, f, indent=4)
f.write('\n')
shapes_db_index = {}
for qr in shapes_db_creation_queries:
key, value = parse_sql_creation_query(qr)
shapes_db_index.update({key: value})
with open(settings.shapes_db_index_path, 'w') as f:
json.dump(shapes_db_index, f, indent=4)
f.write('\n')
solids_db_index = {}
for qr in solids_db_creation_queries:
key, value = parse_sql_creation_query(qr)
solids_db_index.update({key: value})
with open(settings.solids_db_index_path, 'w') as f:
json.dump(solids_db_index, f, indent=4)
f.write('\n')
anglessets_db_index = {}
for qr in anglessets_db_creation_queries:
key, value = parse_sql_creation_query(qr)
anglessets_db_index.update({key: value})
with open(settings.anglessets_db_index_path, 'w') as f:
json.dump(anglessets_db_index, f, indent=4)
f.write('\n')
natural_nb_tuples_db_index = {}
for qr in natural_nb_tuples_db_creation_queries:
key, value = parse_sql_creation_query(qr)
natural_nb_tuples_db_index.update({key: value})
with open(settings.natural_nb_tuples_db_index_path, 'w') as f:
json.dump(natural_nb_tuples_db_index, f, indent=4)
f.write('\n')
sys.stderr.write('Done!\n')
if __name__ == '__main__':
__main__()
| gpl-3.0 | 5,954,798,789,077,043,000 | 47.293737 | 79 | 0.482178 | false |
aberon10/training | training/ticketing_system/views.py | 1 | 10444 | # -*- coding: utf-8 -*-
import time
from django.db.models import Q
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.http import HttpResponseNotFound
from django.contrib.auth.hashers import make_password
from django.contrib.auth.hashers import check_password
from django.views.generic.edit import FormView
from django.views.generic import TemplateView
from django.views.generic import RedirectView
from .forms import SignInForm
from .forms import LoginForm
from .forms import TicketCreateForm
from .models import User
from .models import Ticket
class LoginView(FormView):
""" Login View. """
form_class = LoginForm
template_name = 'ticketing_system/login.html'
success_url = '/dashboard'
def get(self, request, *args, **kwargs):
if request.session.get('user'):
return HttpResponseRedirect(self.get_success_url())
else:
return render(
request,
template_name=self.template_name,
context={'form': self.form_class}
)
def form_valid(self, form):
context = {
'form': form,
'error_login': 'The user and/or password do not match'
}
email = form.cleaned_data['email']
password = form.cleaned_data['password']
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
pass
else:
if check_password(password, user.password):
# create the new user session
self.request.session['user'] = user.email
self.request.session['name'] = user.name
return HttpResponseRedirect(self.get_success_url())
return render(
self.request,
template_name=self.template_name,
context=context
)
class LogoutView(RedirectView):
""" Logout View. """
url = '/login'
def get(self, request, *args, **kwargs):
try:
# delete the user session
del request.session['user']
del request.session['name']
except KeyError:
pass
return super(LogoutView, self).get(request, *args, **kwargs)
class RegisterView(TemplateView):
""" Register View. """
template_name = 'ticketing_system/register.html'
def get(self, request, *args, **kwargs):
form = SignInForm()
return render(
request,
template_name=self.template_name,
context={'register_form': form}
)
def post(self, request, *args, **kwargs):
form = SignInForm(request.POST)
response = {
'register_form': form,
'message': '',
'success': False
}
if form.is_valid():
email = form.cleaned_data['email']
password = form.cleaned_data['password']
confirm_password = form.cleaned_data['confirm_password']
name = form.cleaned_data['name']
if password != confirm_password:
response['register_form']['confirm_password'].error_messages =\
'Passwords do not match..'
else:
try:
User.objects.get(email=email)
except User.DoesNotExist:
user = User(
email=email,
name=name,
password=make_password(password)
)
user.save()
response['register_form'] = SignInForm()
response['success'] = True
response['message'] = 'You have successfully \
registered!'
else:
response['register_form']['email'].error_messages = \
'User already exists'
return render(
request,
template_name=self.template_name,
context=response,
)
class DashboardView(TemplateView):
""" Dashboard View. """
template_name = 'ticketing_system/dashboard.html'
def get(self, request, *args, **kwargs):
if request.session.get('user'):
user = User.objects.get(email=request.session['user'])
tickets = Ticket.objects.filter(
Q(status='O'),
Q(author=user) | Q(assignee=user)
).distinct()
return render(
request,
template_name=self.template_name,
context={
'current_path': request.path.split('/')[1],
'tickets': tickets
}
)
else:
return HttpResponseRedirect('/login')
def post(self, request, *args, **kwargs):
if request.session['user']:
title = request.POST.get('title')
status = request.POST.get('status')
user = User.objects.get(email=request.session['user'])
tickets = Ticket.objects.all()
if title != '':
tickets = tickets.filter(title__icontains=title)
tickets = tickets.filter(
Q(status=status),
Q(author=user) | Q(assignee=user)
).distinct()
return render(
request,
template_name=self.template_name,
context={
'current_path': request.path.split('/')[1],
'tickets': tickets
}
)
else:
return HttpResponseRedirect('/login')
class TicketView(FormView):
""" Ticket View. """
form_class = TicketCreateForm
template_name = 'ticketing_system/ticket_form.html'
success_url = '/ticket'
def get(self, request, *args, **kwargs):
if not request.session.get('user'):
return HttpResponseRedirect('/login')
else:
user = User.objects.get(email=request.session['user'])
try:
if kwargs['id_ticket']:
try:
ticket = Ticket.objects.filter(
Q(pk=int(kwargs['id_ticket'])),
Q(author=user) | Q(assignee=user)
)[0]
except Ticket.DoesNotExist:
return HttpResponseNotFound('<h1>Page not found</h1>')
else:
form = self.form_class(initial={
'title': ticket.title,
'body': ticket.body,
'author': ticket.author,
'created': ticket.created,
'status': ticket.status,
'assignee': ticket.assignee.all()
})
except KeyError:
form = self.form_class(initial={
'author': request.session['user'],
'created': time.strftime('%Y-%m-%d'),
'status': 'O',
'assignee': user.id
})
return render(
request,
template_name=self.template_name,
context={'form': form}
)
def post(self, request, *args, **kwargs):
if not request.session.get('user'):
return HttpResponseRedirect('/login')
else:
error_message = ''
ticket = Ticket()
assignees_users = request.POST.getlist('assignee')
form = TicketCreateForm({
'title': request.POST.get('title'),
'body': request.POST.get('body'),
'status': request.POST.get('status'),
'created': request.POST.get('created')
})
if form.is_valid():
title = form.cleaned_data['title']
body = form.cleaned_data['body']
email = self.request.session['user']
created = form.cleaned_data['created']
status = form.cleaned_data['status']
author = User.objects.get(email=email)
try:
if kwargs['id_ticket']:
ticket = Ticket.objects.get(
pk=int(kwargs['id_ticket'])
)
for item in ticket.assignee.all():
user = User.objects.get(pk=int(item.id))
ticket.assignee.remove(user)
except KeyError:
pass
try:
users = []
for user in assignees_users:
users.append(User.objects.get(pk=int(user)))
except User.DoesNotExist:
error_message = 'Error creating ticket'
else:
ticket.title = title
ticket.body = body
ticket.author = author
ticket.created = created
ticket.status = status
ticket.save()
if not users:
users.append(author)
ticket.assignee.set(users)
return HttpResponseRedirect('/dashboard')
return render(
request,
template_name=self.template_name,
context={
'form': TicketCreateForm(request.POST),
'error_message': error_message
}
)
class TicketDeleteView(TemplateView):
def get(self, request, *args, **kwargs):
if not request.session.get('user'):
return HttpResponseRedirect('/login')
else:
try:
if kwargs['id_ticket']:
user = User.objects.get(email=request.session['user'])
ticket = Ticket.objects.filter(
Q(pk=int(kwargs['id_ticket'])),
Q(author=user) | Q(assignee=user)
).distinct()
ticket.delete()
except KeyError:
pass
except Ticket.DoesNotExist:
pass
return HttpResponseRedirect('/dashboard')
| mit | -1,894,123,065,612,673,500 | 32.474359 | 79 | 0.486021 | false |
cadithealth/iniherit | iniherit/parser.py | 1 | 9823 | # -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# file: $Id$
# auth: Philip J Grabner <[email protected]>
# date: 2013/08/20
# copy: (C) Copyright 2013 Cadit Health Inc., All Rights Reserved.
#------------------------------------------------------------------------------
import io
import os.path
import warnings
import six
from six.moves import configparser as CP
from six.moves import urllib
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict
# TODO: PY3 added a `ConfigParser.read_dict` that should probably
# be overridden as well...
# TODO: should `ConfigParser.set()` be checked for option==INHERITTAG?...
from . import interpolation
__all__ = (
'Loader', 'IniheritMixin', 'RawConfigParser',
'ConfigParser', 'SafeConfigParser',
'DEFAULT_INHERITTAG',
)
#------------------------------------------------------------------------------
_real_RawConfigParser = CP.RawConfigParser
_real_ConfigParser = CP.ConfigParser
_real_SafeConfigParser = CP.SafeConfigParser
DEFAULT_INHERITTAG = '%inherit'
#------------------------------------------------------------------------------
class Loader(object):
def load(self, name, encoding=None):
# todo: these fp are leaked... need to use "contextlib.closing" somehow...
if encoding is None:
return open(name)
return open(name, encoding=encoding)
#------------------------------------------------------------------------------
def _get_real_interpolate(parser):
# todo: should this be sensitive to `parser`?...
return \
getattr(_real_ConfigParser, '_iniherit__interpolate', None) \
or getattr(_real_ConfigParser, '_interpolate', None)
#------------------------------------------------------------------------------
# TODO: this would probably be *much* simpler with meta-classes...
#------------------------------------------------------------------------------
class IniheritMixin(object):
IM_INHERITTAG = DEFAULT_INHERITTAG
IM_DEFAULTSECT = CP.DEFAULTSECT
#----------------------------------------------------------------------------
def __init__(self, *args, **kw):
self.loader = kw.get('loader', None) or Loader()
self.inherit = True
self.IM_INHERITTAG = DEFAULT_INHERITTAG
self.IM_DEFAULTSECT = getattr(self, 'default_section', CP.DEFAULTSECT)
#----------------------------------------------------------------------------
def read(self, filenames, encoding=None):
if isinstance(filenames, six.string_types):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
fp = self._load(filename, encoding=encoding)
except IOError:
continue
self._read(fp, filename, encoding=encoding)
fp.close()
read_ok.append(filename)
return read_ok
#----------------------------------------------------------------------------
def _load(self, filename, encoding=None):
if not getattr(self, 'loader', None):
self.loader = Loader()
return self.loader.load(filename, encoding=encoding)
#----------------------------------------------------------------------------
def _read(self, fp, fpname, encoding=None):
if getattr(self, 'inherit', True) or not hasattr(self, '_iniherit__read'):
raw = self._readRecursive(fp, fpname, encoding=encoding)
self._apply(raw, self)
else:
self._iniherit__read(fp, fpname)
#----------------------------------------------------------------------------
def _makeParser(self, raw=True):
ret = _real_RawConfigParser() if raw else _real_ConfigParser()
ret.inherit = False
## TODO: any other configurations that need to be copied into `ret`??...
ret.optionxform = self.optionxform
return ret
#----------------------------------------------------------------------------
def _readRecursive(self, fp, fpname, encoding=None):
ret = self._makeParser()
src = self._makeParser()
src.readfp(fp, fpname)
dirname = os.path.dirname(fpname)
if src.has_option(self.IM_DEFAULTSECT, self.IM_INHERITTAG):
inilist = src.get(self.IM_DEFAULTSECT, self.IM_INHERITTAG)
src.remove_option(self.IM_DEFAULTSECT, self.IM_INHERITTAG)
inilist = self._interpolate_with_vars(
src, self.IM_DEFAULTSECT, self.IM_INHERITTAG, inilist)
for curname in inilist.split():
optional = curname.startswith('?')
if optional:
curname = curname[1:]
curname = os.path.join(dirname, urllib.parse.unquote(curname))
try:
curfp = self._load(curname, encoding=encoding)
except IOError:
if optional:
continue
raise
self._apply(self._readRecursive(curfp, curname, encoding=encoding), ret)
for section in src.sections():
if not src.has_option(section, self.IM_INHERITTAG):
continue
inilist = src.get(section, self.IM_INHERITTAG)
src.remove_option(section, self.IM_INHERITTAG)
inilist = self._interpolate_with_vars(
src, section, self.IM_INHERITTAG, inilist)
for curname in inilist.split():
optional = curname.startswith('?')
if optional:
curname = curname[1:]
fromsect = section
if '[' in curname and curname.endswith(']'):
curname, fromsect = curname.split('[', 1)
fromsect = urllib.parse.unquote(fromsect[:-1])
curname = os.path.join(dirname, urllib.parse.unquote(curname))
try:
curfp = self._load(curname, encoding=encoding)
except IOError:
if optional:
continue
raise
self._apply(self._readRecursive(curfp, curname, encoding=encoding), ret,
sections={fromsect: section})
self._apply(src, ret)
return ret
#----------------------------------------------------------------------------
def _apply(self, src, dst, sections=None):
# todo: this does not detect the case that a section overrides
# the default section with the exact same value... ugh.
if sections is None:
for option, value in src.items(self.IM_DEFAULTSECT):
value = interpolation.interpolate_super(
self, src, dst, self.IM_DEFAULTSECT, option, value)
self._im_setraw(dst, self.IM_DEFAULTSECT, option, value)
if sections is None:
sections = OrderedDict([(s, s) for s in src.sections()])
for srcsect, dstsect in sections.items():
if not dst.has_section(dstsect):
dst.add_section(dstsect)
for option, value in src.items(srcsect):
# todo: this is a *terrible* way of detecting if this option is
# defaulting...
if src.has_option(self.IM_DEFAULTSECT, option) \
and value == src.get(self.IM_DEFAULTSECT, option):
continue
value = interpolation.interpolate_super(
self, src, dst, dstsect, option, value)
self._im_setraw(dst, dstsect, option, value)
#----------------------------------------------------------------------------
def _im_setraw(self, parser, section, option, value):
if six.PY3 and hasattr(parser, '_interpolation'):
# todo: don't do this for systems that have
# http://bugs.python.org/issue21265 fixed
try:
tmp = parser._interpolation.before_set
parser._interpolation.before_set = lambda self,s,o,v,*a,**k: v
_real_RawConfigParser.set(parser, section, option, value)
finally:
parser._interpolation.before_set = tmp
else:
_real_RawConfigParser.set(parser, section, option, value)
#----------------------------------------------------------------------------
def _interpolate_with_vars(self, parser, section, option, rawval):
## TODO: ugh. this just doesn't feel "right"...
try:
vars = dict(parser.items(section, raw=True))
except:
vars = dict(parser.items(section))
if not isinstance(parser, _real_ConfigParser):
parser = self._makeParser(raw=False)
base_interpolate = _get_real_interpolate(parser)
return interpolation.interpolate(
parser, base_interpolate, section, option, rawval, vars)
#----------------------------------------------------------------------------
# todo: yikes! overriding a private method!...
def _interpolate(self, section, option, rawval, vars):
base_interpolate = _get_real_interpolate(self)
return interpolation.interpolate(
self, base_interpolate, section, option, rawval, vars)
if not hasattr(_real_ConfigParser, '_interpolate') and not six.PY3:
warnings.warn(
'ConfigParser did not have a "_interpolate" method'
' -- iniherit may be broken on this platform',
RuntimeWarning)
#------------------------------------------------------------------------------
# todo: i'm a little worried about the diamond inheritance here...
class RawConfigParser(IniheritMixin, _real_RawConfigParser):
_DEFAULT_INTERPOLATION = interpolation.IniheritInterpolation()
def __init__(self, *args, **kw):
loader = kw.pop('loader', None)
IniheritMixin.__init__(self, loader=loader)
_real_RawConfigParser.__init__(self, *args, **kw)
class ConfigParser(RawConfigParser, _real_ConfigParser):
def __init__(self, *args, **kw):
loader = kw.pop('loader', None)
RawConfigParser.__init__(self, loader=loader)
_real_ConfigParser.__init__(self, *args, **kw)
class SafeConfigParser(ConfigParser, _real_SafeConfigParser):
def __init__(self, *args, **kw):
loader = kw.pop('loader', None)
ConfigParser.__init__(self, loader=loader)
_real_SafeConfigParser.__init__(self, *args, **kw)
#------------------------------------------------------------------------------
# end of $Id$
# $ChangeLog$
#------------------------------------------------------------------------------
| mit | 6,685,569,589,398,013,000 | 38.769231 | 80 | 0.551461 | false |
MadsJensen/RP_scripts | extract_ts_epochs_interupt.py | 1 | 1207 | import sys
import numpy as np
import mne
from mne.minimum_norm import read_inverse_operator, apply_inverse_epochs
from my_settings import (mne_folder, epochs_folder, source_folder)
subject = sys.argv[1]
method = "dSPM"
snr = 1.
lambda2 = 1. / snr**2
labels = mne.read_labels_from_annot(
subject=subject, parc="PALS_B12_Brodmann", regexp="Brodmann")
condition = "interupt"
inv = read_inverse_operator(mne_folder + "%s_%s-inv.fif" % (subject, condition
))
epochs = mne.read_epochs(epochs_folder + "%s_%s-epo.fif" % (subject, condition
))
# epochs.resample(500)
stcs = apply_inverse_epochs(
epochs["press"], inv, lambda2, method=method, pick_ori=None)
ts = [
mne.extract_label_time_course(
stc, labels, inv["src"], mode="mean_flip") for stc in stcs
]
# for h, tc in enumerate(ts):
# for j, t in enumerate(tc):
# t *= np.sign(t[np.argmax(np.abs(t))])
# tc[j, :] = t
# ts[h] = tc
ts = np.asarray(ts)
stc.save(source_folder + "%s_%s_epo" % (subject, condition))
np.save(source_folder + "ave_ts/%s_%s_ts-epo.npy" % (subject, condition), ts)
| bsd-3-clause | 1,019,196,104,266,555,100 | 29.175 | 78 | 0.589064 | false |
ContinuumIO/chaco | setup.py | 1 | 2424 | # Copyright (c) 2008-2012 by Enthought, Inc.
# All rights reserved.
from os.path import join
from numpy import get_include
from setuptools import setup, Extension, find_packages
info = {}
execfile(join('chaco', '__init__.py'), info)
numpy_include_dir = get_include()
# Register Python extensions
contour = Extension(
'chaco.contour.contour',
sources=['chaco/contour/cntr.c'],
include_dirs=[numpy_include_dir],
define_macros=[('NUMPY', None)]
)
cython_speedups = Extension(
'chaco._cython_speedups',
sources=['chaco/_cython_speedups.c'],
include_dirs=[numpy_include_dir],
)
# Commenting this out for now, until we get the module fully tested and working
#speedups = Extension(
# 'chaco._speedups',
# sources = ['chaco/_speedups.cpp'],
# include_dirs = [get_include()],
# define_macros=[('NUMPY', None)]
# )
setup(
name = 'chaco',
version = info['__version__'],
author = 'Peter Wang, et. al.',
author_email = '[email protected]',
maintainer = 'ETS Developers',
maintainer_email = '[email protected]',
url = 'http://code.enthought.com/projects/chaco',
classifiers = [c.strip() for c in """\
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
Intended Audience :: Science/Research
License :: OSI Approved :: BSD License
Operating System :: MacOS
Operating System :: Microsoft :: Windows
Operating System :: OS Independent
Operating System :: POSIX
Operating System :: Unix
Programming Language :: C
Programming Language :: Python
Topic :: Scientific/Engineering
Topic :: Software Development
Topic :: Software Development :: Libraries
""".splitlines() if len(c.strip()) > 0],
package_data={'chaco': ['tools/toolbars/images/*.png',
'layers/data/*.svg']},
description = 'interactive 2-dimensional plotting',
long_description = open('README.rst').read(),
download_url = ('http://www.enthought.com/repo/ets/chaco-%s.tar.gz' %
info['__version__']),
ext_modules = [contour, cython_speedups],
include_package_data = True,
install_requires = info['__requires__'],
license = 'BSD',
packages = find_packages(),
platforms = ["Windows", "Linux", "Mac OS-X", "Unix", "Solaris"],
zip_safe = False,
)
| bsd-3-clause | -1,258,937,183,096,643,600 | 31.756757 | 79 | 0.622937 | false |
cread/ecks | ecks/plugins/disk.py | 1 | 2091 | """
Ecks plugin to collect disk usage information
Copyright 2011 Chris Read ([email protected])
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pprint
def _calc_size(block_size, block_count):
if block_size and block_count:
return block_size * block_count
else:
return -1
def get_disk(parent, host, community):
""" This is a plugin to be loaded by Ecks
return an array of tuples containing (type, path, size in bytes, used bytes) for each block device
type is an integer which is one of the following:
hrStorageOther = 1
hrStorageRam = 2
hrStorageVirtualMemory = 3
hrStorageFixedDisk = 4
hrStorageRemovableDisk = 5
hrStorageFloppyDisk = 6
hrStorageCompactDisc = 7
hrStorageRamDisk = 8
hrStorageFlashMemory = 9
hrStorageNetworkDisk = 10
"""
disks = (1,3,6,1,2,1,25,2,3,1) # HOST-RESOURCES-MIB
data = parent.get_snmp_data(host, community, disks, 1)
# We need to work this out the long was as there are cases where size or used is not supplied
details = []
formatted = []
for i in [t for t in parent._extract(data, int, 1)]:
details.append([ value for (oid, (data_type, index), value) in data if index == i and data_type != 1])
for dev in details:
if len(dev) != 5:
continue
formatted.append((
tuple(dev[0])[-1],
str(dev[1]),
int(dev[2]) * int(dev[3]),
int(dev[2]) * int(dev[4])
))
return formatted
| apache-2.0 | 7,506,630,265,331,763,000 | 30.681818 | 109 | 0.642276 | false |
MaxTyutyunnikov/lino | obsolete/src/sandbox/cherrypy/4/hello4.py | 1 | 1278 | #!/usr/bin/python
import cherrypy
from HyperText.Documents import Document
from HyperText import HTML as html
from lino.apps.pinboard import demo
class Root:
def __init__(self,dbsess):
self.dbsess = dbsess
def default(self,*args,**kw):
title=str(self.dbsess)
doc=Document(title=html.TITLE(title))
div = html.DIV(klass="title")
doc.append(div)
div.append(html.H1(title))
div = html.DIV(klass="menu")
doc.append(div)
p = html.P("Menu:")
div.append(p)
p.append(html.BR())
p.append(html.A("home",href="/"))
p.append(html.BR())
p.append(html.A("foo",href="foo/bar/baz"))
p.append(html.BR())
p.append(html.A("reports",href="report"))
doc.append(html.P(self.dbsess.app.aboutString()))
doc.append(html.P('args='+repr(args)))
doc.append(html.P('kw='+repr(kw)))
#
div = html.DIV(klass="footer")
doc.append(div)
div.append(html.P("foo "+cherrypy.request.base + " bar"))
return str(doc)
default.exposed = True
dbsess=demo.startup()
frm = dbsess.db.app.showMainForm()
cherrypy.root = Root(dbsess)
cherrypy.server.start()
| gpl-3.0 | 2,859,601,189,977,585,700 | 22.236364 | 65 | 0.564945 | false |
alpine9000/amiga_examples | tools/external/amitools/amitools/binfmt/hunk/HunkReader.py | 1 | 30698 | """A class for reading Amiga executables and object files in Hunk format"""
import os
import struct
import StringIO
from types import *
from Hunk import *
class HunkReader:
"""Load Amiga executable Hunk structures"""
def __init__(self):
self.hunks = []
self.error_string = None
self.type = None
self.header = None
self.segments = []
self.overlay = None
self.overlay_headers = None
self.overlay_segments = None
self.libs = None
self.units = None
def get_struct_summary(self, obj):
if type(obj) == ListType:
result = []
for a in obj:
v = self.get_struct_summary(a)
if v != None:
result.append(v)
return "[" + ",".join(result) + "]"
elif type(obj) == DictType:
if obj.has_key('type_name'):
type_name = obj['type_name']
return type_name.replace('HUNK_','')
else:
result = []
for k in obj.keys():
v = self.get_struct_summary(obj[k])
if v != None:
result.append(k + ":" + v)
return '{' + ",".join(result) + '}'
else:
return None
def get_long(self, data):
return struct.unpack(">I",data)[0]
def read_long(self, f):
data = f.read(4)
if len(data) == 0:
return -1
elif len(data) != 4:
return -(len(data)+1)
return struct.unpack(">I",data)[0]
def read_word(self, f):
data = f.read(2)
if len(data) == 0:
return -1
elif len(data) != 2:
return -(len(data)+1)
return struct.unpack(">H",data)[0]
def read_name(self, f):
num_longs = self.read_long(f)
if num_longs < 0:
return -1,None
elif num_longs == 0:
return 0,""
else:
return self.read_name_size(f, num_longs)
def read_tag(self, f):
data = f.read(4)
if len(data) == 0:
return -1;
elif len(data) != 4:
return -(len(data)+1)
return data
def read_name_size(self, f, num_longs):
size = (num_longs & 0xffffff) * 4
data = f.read(size)
if len(data) < size:
return -1,None
endpos = data.find('\0')
if endpos == -1:
return size,data
elif endpos == 0:
return 0,""
else:
return size,data[:endpos]
def get_index_name(self, strtab, offset):
end = strtab.find('\0',offset)
if end == -1:
return strtab[offset:]
else:
return strtab[offset:end]
def is_valid_first_hunk_type(self, hunk_type):
return hunk_type == HUNK_HEADER or hunk_type == HUNK_LIB or hunk_type == HUNK_UNIT
def parse_header(self, f, hunk):
names = []
hunk['names'] = names
while True:
l,s = self.read_name(f)
if l < 0:
self.error_string = "Error parsing HUNK_HEADER names"
return RESULT_INVALID_HUNK_FILE
elif l == 0:
break
names.append(s)
# table size and hunk range
table_size = self.read_long(f)
first_hunk = self.read_long(f)
last_hunk = self.read_long(f)
if table_size < 0 or first_hunk < 0 or last_hunk < 0:
self.error_string = "HUNK_HEADER invalid table_size or first_hunk or last_hunk"
return RESULT_INVALID_HUNK_FILE
hunk['table_size'] = table_size
hunk['first_hunk'] = first_hunk
hunk['last_hunk'] = last_hunk
# determine number of hunks in size table
num_hunks = last_hunk - first_hunk + 1
hunk_table = []
for a in xrange(num_hunks):
hunk_info = {}
hunk_size = self.read_long(f)
if hunk_size < 0:
self.error_string = "HUNK_HEADER contains invalid hunk_size"
return RESULT_INVALID_HUNK_FILE
hunk_bytes = hunk_size & ~HUNKF_ALL
hunk_bytes *= 4 # longs to bytes
hunk_info['size'] = hunk_bytes
self.set_mem_flags(hunk_info, hunk_size & HUNKF_ALL, 30)
hunk_table.append(hunk_info)
hunk['hunks'] = hunk_table
return RESULT_OK
def parse_code_or_data(self, f, hunk):
num_longs = self.read_long(f)
if num_longs < 0:
self.error_string = "%s has invalid size" % (hunk['type_name'])
return RESULT_INVALID_HUNK_FILE
# read in hunk data
size = num_longs * 4
hunk['size'] = size & ~HUNKF_ALL
flags = size & HUNKF_ALL
self.set_mem_flags(hunk, flags, 30)
hunk['data_file_offset'] = f.tell()
data = f.read(hunk['size'])
hunk['data'] = data
return RESULT_OK
def parse_bss(self, f, hunk):
num_longs = self.read_long(f)
if num_longs < 0:
self.error_string = "%s has invalid size" % (hunk['type_name'])
return RESULT_INVALID_HUNK_FILE
# read in hunk data
size = num_longs * 4
hunk['size'] = size & ~HUNKF_ALL
flags = size & HUNKF_ALL
self.set_mem_flags(hunk, flags, 30)
return RESULT_OK
def parse_reloc(self, f, hunk):
num_relocs = 1
reloc = {}
hunk['reloc'] = reloc
while num_relocs != 0:
num_relocs = self.read_long(f)
if num_relocs < 0:
self.error_string = "%s has invalid number of relocations" % (hunk['type_name'])
return RESULT_INVALID_HUNK_FILE
elif num_relocs == 0:
# last relocation found
break
# build reloc map
hunk_num = self.read_long(f)
if hunk_num < 0:
self.error_string = "%s has invalid hunk num" % (hunk['type_name'])
return RESULT_INVALID_HUNK_FILE
offsets = []
for a in xrange(num_relocs & 0xffff):
offset = self.read_long(f)
if offset < 0:
self.error_string = "%s has invalid relocation #%d offset %d (num_relocs=%d hunk_num=%d, offset=%d)" \
% (hunk['type_name'],a,offset,num_relocs,hunk_num,f.tell())
return RESULT_INVALID_HUNK_FILE
offsets.append(offset)
reloc[hunk_num] = offsets
return RESULT_OK
def parse_reloc_short(self, f, hunk):
num_relocs = 1
reloc = {}
hunk['reloc'] = reloc
total_words = 0
while num_relocs != 0:
num_relocs = self.read_word(f)
if num_relocs < 0:
self.error_string = "%s has invalid number of relocations" % (hunk['type_name'])
return RESULT_INVALID_HUNK_FILE
elif num_relocs == 0:
# last relocation found
total_words += 1
break
# build reloc map
hunk_num = self.read_word(f)
if hunk_num < 0:
self.error_string = "%s has invalid hunk num" % (hunk['type_name'])
return RESULT_INVALID_HUNK_FILE
offsets = []
count = num_relocs & 0xffff
total_words += count + 2
for a in xrange(count):
offset = self.read_word(f)
if offset < 0:
self.error_string = "%s has invalid relocation #%d offset %d (num_relocs=%d hunk_num=%d, offset=%d)" \
% (hunk['type_name'],a,offset,num_relocs,hunk_num,f.tell())
return RESULT_INVALID_HUNK_FILE
offsets.append(offset)
reloc[hunk_num] = offsets
# padding
if total_words & 1 == 1:
self.read_word(f)
return RESULT_OK
def parse_symbol(self, f, hunk):
name_len = 1
symbols = []
hunk['symbols'] = symbols
while name_len > 0:
(name_len, name) = self.read_name(f)
if name_len < 0:
self.error_string = "%s has invalid symbol name" % (hunk['type_name'])
return RESULT_INVALID_HUNK_FILE
elif name_len == 0:
# last name occurred
break
value = self.read_long(f)
if value < 0:
self.error_string = "%s has invalid symbol value" % (hunk['type_name'])
return RESULT_INVALID_HUNK_FILE
symbols.append( (name,value) )
return RESULT_OK
def parse_debug(self, f, hunk):
num_longs = self.read_long(f)
if num_longs < 0:
self.error_string = "%s has invalid size" % (hunk['type_name'])
return RESULT_INVALID_HUNK_FILE
size = num_longs * 4
offset = self.read_long(f)
hunk['debug_offset'] = offset;
tag = self.read_tag(f)
hunk['debug_type'] = tag;
size -= 8
if tag == 'LINE':
# parse LINE: source line -> code offset mapping
l = self.read_long(f)
size -= l * 4 + 4;
l,n = self.read_name_size(f,l)
src_map = []
hunk['src_file'] = n
hunk['src_map'] = src_map
while size > 0:
line_no = self.read_long(f)
offset = self.read_long(f)
size -= 8
src_map.append([line_no,offset])
else:
# read unknown DEBUG hunk
hunk['data'] = f.read(size)
return RESULT_OK
def find_first_code_hunk(self):
for hunk in self.hunks:
if hunk['type'] == HUNK_CODE:
return hunk
return None
def parse_overlay(self, f, hunk):
# read size of overlay hunk
ov_size = self.read_long(f)
if ov_size < 0:
self.error_string = "%s has invalid size" % (hunk['type_name'])
return RESULT_INVALID_HUNK_FILE
# read data of overlay
byte_size = (ov_size + 1) *4
ov_data = f.read(byte_size)
hunk['ov_data'] = ov_data
# check: first get header hunk
hdr_hunk = self.hunks[0]
if hdr_hunk['type'] != HUNK_HEADER:
self.error_string = "%s has no header hunk" % (hunk['type_name'])
return RESULT_INVALID_HUNK_FILE
# first find the code segment of the overlay manager
overlay_mgr_hunk = self.find_first_code_hunk()
if overlay_mgr_hunk == None:
self.error_string = "%s has no overlay manager hunk" % (hunk['type_name'])
return RESULT_INVALID_HUNK_FILE
# check overlay manager
overlay_mgr_data = overlay_mgr_hunk['data']
magic = self.get_long(overlay_mgr_data[4:8])
if magic != 0xabcd:
self.error_string = "no valid overlay manager magic found"
return RESULT_INVALID_HUNK_FILE
# check for standard overlay manager
magic2 = self.get_long(overlay_mgr_data[24:28])
magic3 = self.get_long(overlay_mgr_data[28:32])
magic4 = self.get_long(overlay_mgr_data[32:36])
std_overlay = (magic2 == 0x5ba0) and (magic3 == 0x074f7665) and (magic4 == 0x726c6179)
hunk['ov_std'] = std_overlay
return RESULT_OK
def parse_lib(self, f, hunk):
lib_size = self.read_long(f)
hunk['lib_file_offset'] = f.tell()
return RESULT_OK,lib_size * 4
def parse_index(self, f, hunk):
index_size = self.read_long(f)
total_size = index_size * 4
# first read string table
strtab_size = self.read_word(f)
strtab = f.read(strtab_size)
total_size -= strtab_size + 2
# read units
units = []
hunk['units'] = units
unit_no = 0
while total_size > 2:
# read name of unit
name_offset = self.read_word(f)
total_size -= 2
if name_offset == 0:
break
unit = {}
units.append(unit)
unit['unit_no'] = unit_no
unit_no += 1
# generate unit name
unit['name'] = self.get_index_name(strtab, name_offset)
# hunks in unit
hunk_begin = self.read_word(f)
num_hunks = self.read_word(f)
total_size -= 4
unit['hunk_begin_offset'] = hunk_begin
# for all hunks in unit
ihunks = []
unit['hunk_infos'] = ihunks
for a in xrange(num_hunks):
ihunk = {}
ihunks.append(ihunk)
# get hunk info
name_offset = self.read_word(f)
hunk_size = self.read_word(f)
hunk_type = self.read_word(f)
total_size -= 6
ihunk['name'] = self.get_index_name(strtab, name_offset)
ihunk['size'] = hunk_size
ihunk['type'] = hunk_type & 0x3fff
self.set_mem_flags(ihunk,hunk_type & 0xc000,14)
ihunk['type_name'] = hunk_names[hunk_type & 0x3fff]
# get references
num_refs = self.read_word(f)
total_size -= 2
if num_refs > 0:
refs = []
ihunk['refs'] = refs
for b in xrange(num_refs):
ref = {}
name_offset = self.read_word(f)
total_size -= 2
name = self.get_index_name(strtab, name_offset)
if name == '':
# 16 bit refs point to the previous zero byte before the string entry...
name = self.get_index_name(strtab, name_offset+1)
ref['bits'] = 16
else:
ref['bits'] = 32
ref['name'] = name
refs.append(ref)
# get definitions
num_defs = self.read_word(f)
total_size -= 2
if num_defs > 0:
defs = []
ihunk['defs'] = defs
for b in xrange(num_defs):
name_offset = self.read_word(f)
def_value = self.read_word(f)
def_type_flags = self.read_word(f)
def_type = def_type_flags & 0x3fff
def_flags = def_type_flags & 0xc000
total_size -= 6
name = self.get_index_name(strtab, name_offset)
d = { 'name':name, 'value':def_value,'type':def_type}
self.set_mem_flags(d,def_flags,14)
defs.append(d)
# align hunk
if total_size == 2:
self.read_word(f)
elif total_size != 0:
self.error_string = "%s has invalid padding" % (hunk['type_name'])
return RESULT_INVALID_HUNK_FILE
return RESULT_OK
def parse_ext(self, f, hunk):
ext_def = []
ext_ref = []
ext_common = []
hunk['ext_def'] = ext_def
hunk['ext_ref'] = ext_ref
hunk['ext_common'] = ext_common
ext_type_size = 1
while ext_type_size > 0:
# ext type | size
ext_type_size = self.read_long(f)
if ext_type_size < 0:
self.error_string = "%s has invalid size" % (hunk['type_name'])
return RESULT_INVALID_HUNK_FILE
ext_type = ext_type_size >> EXT_TYPE_SHIFT
ext_size = ext_type_size & EXT_TYPE_SIZE_MASK
# ext name
l,ext_name = self.read_name_size(f, ext_size)
if l < 0:
self.error_string = "%s has invalid name" % (hunk['type_name'])
return RESULT_INVALID_HUNK_FILE
elif l == 0:
break
# create local ext object
ext = { 'type' : ext_type, 'name' : ext_name }
# check and setup type name
if not ext_names.has_key(ext_type):
self.error_string = "%s has unspported ext entry %d" % (hunk['type_name'],ext_type)
return RESULT_INVALID_HUNK_FILE
ext['type_name'] = ext_names[ext_type]
# ext common
if ext_type == EXT_ABSCOMMON or ext_type == EXT_RELCOMMON:
ext['common_size'] = self.read_long(f)
ext_common.append(ext)
# ext def
elif ext_type == EXT_DEF or ext_type == EXT_ABS or ext_type == EXT_RES:
ext['def'] = self.read_long(f)
ext_def.append(ext)
# ext ref
else:
num_refs = self.read_long(f)
if num_refs == 0:
num_refs = 1
refs = []
for a in xrange(num_refs):
ref = self.read_long(f)
refs.append(ref)
ext['refs'] = refs
ext_ref.append(ext)
return RESULT_OK
def parse_unit_or_name(self, f, hunk):
l,n = self.read_name(f)
if l < 0:
self.error_string = "%s has invalid name" % (hunk['type_name'])
return RESULT_INVALID_HUNK_FILE
elif l > 0:
hunk['name'] = n
else:
hunk['name'] = ""
return RESULT_OK
def set_mem_flags(self, hunk, flags, shift):
f = flags >> shift
if f & 1 == 1:
hunk['memf'] = 'chip'
elif f & 2 == 2:
hunk['memf'] = 'fast'
else:
hunk['memf'] = ''
# ----- public functions -----
"""Read a hunk file and build internal hunk structure
Return status and set self.error_string on failure
"""
def read_file(self, hfile, v37_compat=None):
with open(hfile, "rb") as f:
return self.read_file_obj(hfile, f, v37_compat)
"""Read a hunk from memory"""
def read_mem(self, name, data, v37_compat=None):
fobj = StringIO.StringIO(data)
return self.read_file_obj(name, fobj, v37_compat)
def read_file_obj(self, hfile, f, v37_compat):
self.hunks = []
is_first_hunk = True
was_end = False
was_potentail_v37_hunk = False
was_overlay = False
self.error_string = None
lib_size = 0
last_file_offset = 0
while True:
hunk_file_offset = f.tell()
# read hunk type
hunk_raw_type = self.read_long(f)
if hunk_raw_type == -1 or hunk_raw_type == -2: # tolerate extra byte at end
if is_first_hunk:
self.error_string = "No valid hunk file: '%s' is empty" % (hfile)
return RESULT_NO_HUNK_FILE
else:
# eof
break
elif hunk_raw_type < 0:
if is_first_hunk:
self.error_string = "No valid hunk file: '%s' is too short" % (hfile)
return RESULT_NO_HUNK_FILE
else:
self.error_string = "Error reading hunk type @%08x" % (f.tell())
return RESULT_INVALID_HUNK_FILE
hunk_type = hunk_raw_type & HUNK_TYPE_MASK
hunk_flags = hunk_raw_type & HUNK_FLAGS_MASK
# check range of hunk type
if not hunk_names.has_key(hunk_type):
# no hunk file?
if is_first_hunk:
self.error_string = "No hunk file: '%s' type was %d" % (hfile, hunk_type)
return RESULT_NO_HUNK_FILE
elif was_end:
# garbage after an end tag is ignored
return RESULT_OK
elif was_potentail_v37_hunk:
# auto fix v37 -> reread whole file
f.seek(0)
return self.read_file_obj(hfile, f, True)
elif was_overlay:
# seems to be a custom overlay -> read to end of file
ov_custom_data = f.read()
self.hunks[-1]['custom_data'] = ov_custom_data
return RESULT_OK
else:
self.error_string = "Invalid hunk type %d/%x found at @%08x" % (hunk_type,hunk_type,f.tell())
return RESULT_INVALID_HUNK_FILE
else:
# check for valid first hunk type
if is_first_hunk and not self.is_valid_first_hunk_type(hunk_type):
self.error_string = "No hunk file: '%s' first hunk type was %d" % (hfile, hunk_type)
return RESULT_NO_HUNK_FILE
is_first_hunk = False
was_end = False
was_potentail_v37_hunk = False
was_overlay = False
hunk = { 'type' : hunk_type, 'hunk_file_offset' : hunk_file_offset }
self.hunks.append(hunk)
hunk['type_name'] = hunk_names[hunk_type]
self.set_mem_flags(hunk, hunk_flags, 30)
# account for lib
last_hunk_size = hunk_file_offset - last_file_offset
if lib_size > 0:
lib_size -= last_hunk_size
if lib_size > 0:
hunk['in_lib'] = True
# V37 fix?
if hunk_type == HUNK_DREL32:
# try to fix automatically...
if v37_compat == None:
was_potentail_v37_hunk = True
# fix was forced
elif v37_compat:
hunk_type = HUNK_RELOC32SHORT
hunk['fixes'] = 'v37'
# ----- HUNK_HEADER -----
if hunk_type == HUNK_HEADER:
result = self.parse_header(f,hunk)
# ----- HUNK_CODE/HUNK_DATA ------
elif hunk_type == HUNK_CODE or hunk_type == HUNK_DATA or hunk_type == HUNK_PPC_CODE:
result = self.parse_code_or_data(f,hunk)
# ---- HUNK_BSS ----
elif hunk_type == HUNK_BSS:
result = self.parse_bss(f,hunk)
# ----- HUNK_<reloc> -----
elif hunk_type == HUNK_RELRELOC32 or hunk_type == HUNK_ABSRELOC16 \
or hunk_type == HUNK_RELRELOC8 or hunk_type == HUNK_RELRELOC16 or hunk_type == HUNK_ABSRELOC32 \
or hunk_type == HUNK_DREL32 or hunk_type == HUNK_DREL16 or hunk_type == HUNK_DREL8 \
or hunk_type == HUNK_RELRELOC26:
result = self.parse_reloc(f,hunk)
# auto fix v37 bug?
if hunk_type == HUNK_DREL32 and result != RESULT_OK and v37_compat == None:
f.seek(0)
return self.read_file_obj(hfile, f, True)
# ---- HUNK_<reloc short> -----
elif hunk_type == HUNK_RELOC32SHORT:
result = self.parse_reloc_short(f,hunk)
# ----- HUNK_SYMBOL -----
elif hunk_type == HUNK_SYMBOL:
result = self.parse_symbol(f,hunk)
# ----- HUNK_DEBUG -----
elif hunk_type == HUNK_DEBUG:
result = self.parse_debug(f,hunk)
# ----- HUNK_END -----
elif hunk_type == HUNK_END:
was_end = True
result = RESULT_OK
# ----- HUNK_OVERLAY -----
elif hunk_type == HUNK_OVERLAY:
result = self.parse_overlay(f,hunk)
was_overlay = True
# ----- HUNK_BREAK -----
elif hunk_type == HUNK_BREAK:
result = RESULT_OK
# ----- HUNK_LIB -----
elif hunk_type == HUNK_LIB:
result,lib_size = self.parse_lib(f,hunk)
lib_size += 8 # add size of HUNK_LIB itself
# ----- HUNK_INDEX -----
elif hunk_type == HUNK_INDEX:
result = self.parse_index(f,hunk)
# ----- HUNK_EXT -----
elif hunk_type == HUNK_EXT:
result = self.parse_ext(f,hunk)
# ----- HUNK_UNIT -----
elif hunk_type == HUNK_UNIT or hunk_type == HUNK_NAME:
result = self.parse_unit_or_name(f,hunk)
# ----- oops! unsupported hunk -----
else:
self.error_string = "unsupported hunk %d" % (hunk_type)
return RESULT_UNSUPPORTED_HUNKS
# a parse error occurred
if result != RESULT_OK:
return result
last_file_offset = hunk_file_offset
return RESULT_OK
"""Return a list with all the hunk type names that were found
"""
def get_hunk_summary(self):
return self.get_struct_summary(self.hunks)
# ---------- Build Segments from Hunks ----------
def build_loadseg(self):
in_header = True
seek_begin = False
segment = None
segment_list = self.segments
for e in self.hunks:
hunk_type = e['type']
# check for end of header
if in_header and hunk_type in loadseg_valid_begin_hunks:
in_header = False
seek_begin = True
if in_header:
if hunk_type == HUNK_HEADER:
# we are in an overlay!
if self.overlay != None:
segment_list = []
self.overlay_segments.append(segment_list)
self.overlay_headers.append(e)
else:
# set load_seg() header
self.header = e
# start a new segment
segment = []
# setup hunk counter
hunk_no = e['first_hunk']
# we allow a debug hunk in header for SAS compatibility
elif hunk_type == HUNK_DEBUG:
segment.append(e)
else:
self.error_string = "Expected header in loadseg: %s %d/%x" % (e['type_name'], hunk_type, hunk_type)
return False
elif seek_begin:
# a new hunk shall begin
if hunk_type in loadseg_valid_begin_hunks:
segment = [e]
segment_list.append(segment)
seek_header = False
seek_begin = False
e['hunk_no'] = hunk_no
e['alloc_size'] = self.header['hunks'][hunk_no]['size']
hunk_no += 1
# add an extra overlay "hunk"
elif hunk_type == HUNK_OVERLAY:
# assume hunk to be empty
if self.overlay != None:
self.error_string = "Multiple overlay in loadseg: %s %d/%x" % (e['type_name'], hunk_type, hunk_type)
return False
self.overlay = e
self.overlay_headers = []
self.overlay_segments = []
in_header = True
# break
elif hunk_type == HUNK_BREAK:
# assume hunk to be empty
in_header = True
# broken hunk: multiple END or other hunks
elif hunk_type in [HUNK_END, HUNK_NAME, HUNK_DEBUG]:
pass
else:
self.error_string = "Expected hunk start in loadseg: %s %d/%x" % (e['type_name'], hunk_type, hunk_type)
return False
else:
# an extra block in hunk or end is expected
if hunk_type == HUNK_END:
seek_begin = True
# contents of hunk
elif hunk_type in loadseg_valid_extra_hunks or hunk_type == HUNK_DREL32:
segment.append(e)
# broken hunk file without END tag
elif hunk_type in loadseg_valid_begin_hunks:
segment = [e]
segment_list.append(segment)
seek_header = False
seek_begin = False
e['hunk_no'] = hunk_no
e['alloc_size'] = self.header['hunks'][hunk_no]['size']
hunk_no += 1
# unecpected hunk?!
else:
self.error_string = "Unexpected hunk extra in loadseg: %s %d/%x" % (e['type_name'], hunk_type, hunk_type)
return False
return True
def build_unit(self):
force_unit = True
in_hunk = False
name = None
segment = None
unit = None
self.units = []
unit_no = 0
for e in self.hunks:
hunk_type = e['type']
# optional unit as first entry
if hunk_type == HUNK_UNIT:
unit = {}
unit['name'] = e['name']
unit['unit_no'] = unit_no
unit['segments'] = []
unit['unit'] = e
unit_no += 1
self.units.append(unit)
force_unit = False
hunk_no = 0
elif force_unit:
self.error_string = "Expected name hunk in unit: %s %d/%x" % (e['type_name'], hunk_type, hunk_type)
return False
elif not in_hunk:
# begin a named hunk
if hunk_type == HUNK_NAME:
name = e['name']
# main hunk block
elif hunk_type in unit_valid_main_hunks:
segment = [e]
unit['segments'].append(segment)
# give main block the NAME
if name != None:
e['name'] = name
name = None
e['hunk_no'] = hunk_no
hunk_no += 1
in_hunk = True
# broken hunk: ignore multi ENDs
elif hunk_type == HUNK_END:
pass
else:
self.error_string = "Expected main hunk in unit: %s %d/%x" % (e['type_name'], hunk_type, hunk_type)
return False
else:
# a hunk is finished
if hunk_type == HUNK_END:
in_hunk = False
# contents of hunk
elif hunk_type in unit_valid_extra_hunks:
segment.append(e)
# unecpected hunk?!
else:
self.error_string = "Unexpected hunk in unit: %s %d/%x" % (e['type_name'], hunk_type, hunk_type)
return False
return True
def build_lib(self):
self.libs = []
lib_segments = []
seek_lib = True
seek_main = False
for e in self.hunks:
hunk_type = e['type']
# seeking for a LIB hunk
if seek_lib:
if hunk_type == HUNK_LIB:
segment_list = []
lib_segments.append(segment_list)
seek_lib = False
seek_main = True
hunk_no = 0
# get start address of lib hunk in file
lib_file_offset = e['lib_file_offset']
else:
self.error_string = "Expected lib hunk in lib: %s %d/%x" % (e['type_name'], hunk_type, hunk_type)
return False
elif seek_main:
# end of lib? -> index!
if hunk_type == HUNK_INDEX:
seek_main = False
seek_lib = True
lib_units = []
if not self.resolve_index_hunks(e, segment_list, lib_units):
self.error_string = "Error resolving index hunks!"
return False
lib = {}
lib['units'] = lib_units
lib['lib_no'] = len(self.libs)
lib['index'] = e
self.libs.append(lib)
# start of a hunk
elif hunk_type in unit_valid_main_hunks:
segment = [e]
e['hunk_no'] = hunk_no
hunk_no += 1
segment_list.append(segment)
seek_main = False
# calc relative lib address
hunk_lib_offset = e['hunk_file_offset'] - lib_file_offset
e['hunk_lib_offset'] = hunk_lib_offset
else:
self.error_string = "Expected main hunk in lib: %s %d/%x" % (e['type_name'], hunk_type, hunk_type)
return False
else:
# end hunk
if hunk_type == HUNK_END:
seek_main = True
# extra contents
elif hunk_type in unit_valid_extra_hunks:
segment.append(e)
else:
self.error_string = "Unexpected hunk in lib: %s %d/%x" % (e['type_name'], hunk_type, hunk_type)
return False
return True
"""Resolve hunks referenced in the index"""
def resolve_index_hunks(self, index, segment_list, lib_units):
units = index['units']
no = 0
for unit in units:
lib_unit = {}
unit_segments = []
lib_unit['segments'] = unit_segments
lib_unit['name'] = unit['name']
lib_unit['unit_no'] = no
lib_unit['index_unit'] = unit
lib_units.append(lib_unit)
no += 1
# try to find segment with start offset
hunk_offset = unit['hunk_begin_offset']
found = False
for segment in segment_list:
hunk_no = segment[0]['hunk_no']
lib_off = segment[0]['hunk_lib_offset'] / 4 # is in longwords
if lib_off == hunk_offset:
# found segment
num_segs = len(unit['hunk_infos'])
for i in xrange(num_segs):
info = unit['hunk_infos'][i]
seg = segment_list[hunk_no+i]
unit_segments.append(seg)
# renumber hunk
seg[0]['hunk_no'] = i
seg[0]['name'] = info['name']
seg[0]['index_hunk'] = info
found = True
if not found:
return False
return True
"""From the hunk list build a set of segments that form the actual binary"""
def build_segments(self):
self.segments = []
if len(self.hunks) == 0:
self.type = TYPE_UNKNOWN
return False
# determine type of file from first hunk
first_hunk_type = self.hunks[0]['type']
if first_hunk_type == HUNK_HEADER:
self.type = TYPE_LOADSEG
return self.build_loadseg()
elif first_hunk_type == HUNK_UNIT:
self.type = TYPE_UNIT
return self.build_unit()
elif first_hunk_type == HUNK_LIB:
self.type = TYPE_LIB
return self.build_lib()
else:
self.type = TYPE_UNKNOWN
return False
"""Return a summary of the created segment structure"""
def get_segment_summary(self):
return self.get_struct_summary(self.segments)
def get_overlay_segment_summary(self):
if self.overlay_segments != None:
return self.get_struct_summary(self.overlay_segments)
else:
return None
def get_libs_summary(self):
if self.libs != None:
return self.get_struct_summary(self.libs)
else:
return None
def get_units_summary(self):
if self.units != None:
return self.get_struct_summary(self.units)
else:
return None
| bsd-2-clause | -663,237,826,273,252,900 | 30.197154 | 115 | 0.555052 | false |
hankcs/HanLP | hanlp/components/parsers/conll.py | 1 | 2534 | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-12-26 15:37
from typing import Union
from hanlp.utils.io_util import get_resource, TimingFileIterator
from hanlp.utils.log_util import logger
def collapse_enhanced_empty_nodes(sent: list):
collapsed = []
for cells in sent:
if isinstance(cells[0], float):
id = cells[0]
head, deprel = cells[8].split(':', 1)
for x in sent:
arrows = [s.split(':', 1) for s in x[8].split('|')]
arrows = [(head, f'{head}:{deprel}>{r}') if h == str(id) else (h, r) for h, r in arrows]
arrows = sorted(arrows)
x[8] = '|'.join(f'{h}:{r}' for h, r in arrows)
sent[head][7] += f'>{cells[7]}'
else:
collapsed.append(cells)
return collapsed
def read_conll(filepath: Union[str, TimingFileIterator], underline_to_none=False, enhanced_collapse_empty_nodes=False):
sent = []
if isinstance(filepath, str):
filepath: str = get_resource(filepath)
if filepath.endswith('.conllu') and enhanced_collapse_empty_nodes is None:
enhanced_collapse_empty_nodes = True
src = open(filepath, encoding='utf-8')
else:
src = filepath
for idx, line in enumerate(src):
if line.startswith('#'):
continue
line = line.strip()
cells = line.split('\t')
if line and cells:
if enhanced_collapse_empty_nodes and '.' in cells[0]:
cells[0] = float(cells[0])
cells[6] = None
else:
if '-' in cells[0] or '.' in cells[0]:
# sent[-1][1] += cells[1]
continue
cells[0] = int(cells[0])
if cells[6] != '_':
try:
cells[6] = int(cells[6])
except ValueError:
cells[6] = 0
logger.exception(f'Wrong CoNLL format {filepath}:{idx + 1}\n{line}')
if underline_to_none:
for i, x in enumerate(cells):
if x == '_':
cells[i] = None
sent.append(cells)
else:
if enhanced_collapse_empty_nodes:
sent = collapse_enhanced_empty_nodes(sent)
yield sent
sent = []
if sent:
if enhanced_collapse_empty_nodes:
sent = collapse_enhanced_empty_nodes(sent)
yield sent
src.close()
| apache-2.0 | 6,535,195,912,218,275,000 | 33.712329 | 119 | 0.500395 | false |
gautamMalu/XenInBox | pyanaconda/rescue.py | 1 | 18256 | #
# rescue.py - anaconda rescue mode setup
#
# Copyright (C) 2001, 2002, 2003, 2004 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Mike Fulbright <[email protected]>
# Jeremy Katz <[email protected]>
#
import sys
import os
from pyanaconda import iutil
import shutil
import time
import re
import subprocess
from snack import ButtonChoiceWindow, ListboxChoiceWindow,SnackScreen
from pyanaconda.constants import ANACONDA_CLEANUP
from pyanaconda.constants_text import TEXT_OK_BUTTON, TEXT_NO_BUTTON, TEXT_YES_BUTTON
from pyanaconda.text import WaitWindow, OkCancelWindow, ProgressWindow, PassphraseEntryWindow
from pyanaconda.flags import flags
from pyanaconda.installinterfacebase import InstallInterfaceBase
from pyanaconda.i18n import _
from blivet import mountExistingSystem
from blivet.errors import StorageError, DirtyFSError
from blivet.devices import LUKSDevice
from pykickstart.constants import KS_REBOOT, KS_SHUTDOWN
import meh.ui.text
import logging
log = logging.getLogger("anaconda")
class RescueInterface(InstallInterfaceBase):
def waitWindow(self, title, text):
return WaitWindow(self.screen, title, text)
def progressWindow(self, title, text, total, updpct = 0.05, pulse = False):
return ProgressWindow(self.screen, title, text, total, updpct, pulse)
def detailedMessageWindow(self, title, text, longText=None, ty="ok",
default=None, custom_icon=None,
custom_buttons=None, expanded=False):
return self.messageWindow(title, text, ty, default, custom_icon,
custom_buttons)
def messageWindow(self, title, text, ty = "ok", default = None,
custom_icon=None, custom_buttons=None):
if custom_buttons is None:
custom_buttons = []
if ty == "ok":
ButtonChoiceWindow(self.screen, title, text,
buttons=[TEXT_OK_BUTTON])
elif ty == "yesno":
if default and default == "no":
btnlist = [TEXT_NO_BUTTON, TEXT_YES_BUTTON]
else:
btnlist = [TEXT_YES_BUTTON, TEXT_NO_BUTTON]
rc = ButtonChoiceWindow(self.screen, title, text,
buttons=btnlist)
if rc == "yes":
return 1
else:
return 0
elif ty == "custom":
tmpbut = []
for but in custom_buttons:
tmpbut.append(but.replace("_",""))
rc = ButtonChoiceWindow(self.screen, title, text, width=60,
buttons=tmpbut)
idx = 0
for b in tmpbut:
if b.lower() == rc:
return idx
idx = idx + 1
return 0
else:
return OkCancelWindow(self.screen, title, text)
def passphraseEntryWindow(self, device):
w = PassphraseEntryWindow(self.screen, device)
passphrase = w.run()
w.pop()
return passphrase
@property
def meh_interface(self):
return self._meh_interface
@property
def tty_num(self):
return 1
def shutdown (self):
self.screen.finish()
def suspend(self):
pass
def resume(self):
pass
def __init__(self):
InstallInterfaceBase.__init__(self)
self.screen = SnackScreen()
self._meh_interface = meh.ui.text.TextIntf()
def makeFStab(instPath = ""):
if os.access("/proc/mounts", os.R_OK):
f = open("/proc/mounts", "r")
buf = f.read()
f.close()
else:
buf = ""
try:
f = open(instPath + "/etc/fstab", "a")
if buf:
f.write(buf)
f.close()
except IOError as e:
log.info("failed to write /etc/fstab: %s", e)
# make sure they have a resolv.conf in the chroot
def makeResolvConf(instPath):
if flags.imageInstall:
return
if not os.access("/etc/resolv.conf", os.R_OK):
return
if os.access("%s/etc/resolv.conf" %(instPath,), os.R_OK):
f = open("%s/etc/resolv.conf" %(instPath,), "r")
buf = f.read()
f.close()
else:
buf = ""
# already have a nameserver line, don't worry about it
if buf.find("nameserver") != -1:
return
f = open("/etc/resolv.conf", "r")
buf = f.read()
f.close()
# no nameserver, we can't do much about it
if buf.find("nameserver") == -1:
return
shutil.copyfile("%s/etc/resolv.conf" %(instPath,),
"%s/etc/resolv.conf.bak" %(instPath,))
f = open("%s/etc/resolv.conf" %(instPath,), "w+")
f.write(buf)
f.close()
def runShell(screen = None, msg=""):
if screen:
screen.suspend()
print
if msg:
print (msg)
if flags.imageInstall:
print(_("Run %s to unmount the system when you are finished.")
% ANACONDA_CLEANUP)
else:
print(_("When finished please exit from the shell and your "
"system will reboot."))
print
proc = None
if os.path.exists("/usr/bin/firstaidkit-qs"):
proc = subprocess.Popen(["/usr/bin/firstaidkit-qs"])
proc.wait()
if proc is None or proc.returncode!=0:
if os.path.exists("/bin/bash"):
iutil.execConsole()
else:
print(_("Unable to find /bin/sh to execute! Not starting shell"))
time.sleep(5)
if screen:
screen.finish()
def _exception_handler_wrapper(orig_except_handler, screen, *args):
"""
Helper function that wraps the exception handler with snack shutdown.
:param orig_except_handler: original exception handler that should be run
after the wrapping changes are done
:type orig_except_handler: exception handler as can be set as sys.excepthook
:param screen: snack screen that should be shut down before further actions
:type screen: snack screen
"""
screen.finish()
return orig_except_handler(*args)
def _unlock_devices(intf, storage):
try_passphrase = None
for device in storage.devices:
if device.format.type == "luks":
skip = False
unlocked = False
while not (skip or unlocked):
if try_passphrase is None:
passphrase = intf.passphraseEntryWindow(device.name)
else:
passphrase = try_passphrase
if passphrase is None:
# canceled
skip = True
else:
device.format.passphrase = passphrase
try:
device.setup()
device.format.setup()
luks_dev = LUKSDevice(device.format.mapName,
parents=[device],
exists=True)
storage.devicetree._addDevice(luks_dev)
storage.devicetree.populate()
unlocked = True
# try to use the same passhprase for other devices
try_passphrase = passphrase
except StorageError as serr:
log.error("Failed to unlock %s: %s", device.name, serr)
device.teardown(recursive=True)
device.format.passphrase = None
try_passphrase = None
def doRescue(intf, rescue_mount, ksdata):
import blivet
# XXX: hook the exception handler wrapper that turns off snack first
orig_hook = sys.excepthook
sys.excepthook = lambda ty, val, tb: _exception_handler_wrapper(orig_hook,
intf.screen,
ty, val, tb)
for f in [ "services", "protocols", "group", "joe", "man.config",
"nsswitch.conf", "selinux", "mke2fs.conf" ]:
try:
os.symlink('/mnt/runtime/etc/' + f, '/etc/' + f)
except OSError:
pass
# Early shell access with no disk access attempts
if not rescue_mount:
# the %post should be responsible for mounting all needed file systems
# NOTE: 1st script must be bash or simple python as nothing else might be available in the rescue image
if flags.automatedInstall and ksdata.scripts:
from pyanaconda.kickstart import runPostScripts
runPostScripts(ksdata.scripts)
else:
runShell()
sys.exit(0)
if flags.automatedInstall:
readOnly = ksdata.rescue.romount
else:
# prompt to see if we should try and find root filesystem and mount
# everything in /etc/fstab on that root
while True:
rc = ButtonChoiceWindow(intf.screen, _("Rescue"),
_("The rescue environment will now attempt to find your "
"Linux installation and mount it under the directory "
"%s. You can then make any changes required to your "
"system. If you want to proceed with this step choose "
"'Continue'. You can also choose to mount your file systems "
"read-only instead of read-write by choosing 'Read-Only'. "
"\n\n"
"If for some reason this process fails you can choose 'Skip' "
"and this step will be skipped and you will go directly to a "
"command shell.\n\n") % (iutil.getSysroot(),),
[_("Continue"), _("Read-Only"), _("Skip")] )
if rc == _("Skip").lower():
runShell(intf.screen)
sys.exit(0)
else:
readOnly = rc == _("Read-Only").lower()
break
sto = blivet.Blivet(ksdata=ksdata)
blivet.storageInitialize(sto, ksdata, [])
_unlock_devices(intf, sto)
roots = blivet.findExistingInstallations(sto.devicetree)
if not roots:
root = None
elif len(roots) == 1:
root = roots[0]
else:
height = min (len (roots), 12)
if height == 12:
scroll = 1
else:
scroll = 0
lst = []
for root in roots:
lst.append("%s" % root.name)
(button, choice) = \
ListboxChoiceWindow(intf.screen, _("System to Rescue"),
_("Which device holds the root partition "
"of your installation?"), lst,
[ _("OK"), _("Exit") ], width = 30,
scroll = scroll, height = height,
help = "multipleroot")
if button == _("Exit").lower():
root = None
else:
root = roots[choice]
rootmounted = False
if root:
try:
if not flags.imageInstall:
msg = _("The system will reboot automatically when you exit "
"from the shell.")
else:
msg = _("Run %s to unmount the system "
"when you are finished.") % ANACONDA_CLEANUP
try:
mountExistingSystem(sto.fsset, root.device,
allowDirty = True,
readOnly = readOnly)
except DirtyFSError:
if flags.automatedInstall:
log.error("System had dirty file systems which you chose not to mount")
else:
ButtonChoiceWindow(intf.screen, _("Rescue"),
_("Your system had dirty file systems which you chose not "
"to mount. Press return to get a shell from which "
"you can fsck and mount your partitions. %s") % msg,
[_("OK")], width = 50)
rootmounted = False
else:
if flags.automatedInstall:
log.info("System has been mounted under: %s", iutil.getSysroot())
else:
ButtonChoiceWindow(intf.screen, _("Rescue"),
_("Your system has been mounted under %(rootPath)s.\n\n"
"Press <return> to get a shell. If you would like to "
"make your system the root environment, run the command:\n\n"
"\tchroot %(rootPath)s\n\n%(msg)s") %
{'rootPath': iutil.getSysroot(),
'msg': msg},
[_("OK")] )
rootmounted = True
# now turn on swap
if not readOnly:
try:
sto.turnOnSwap()
except StorageError:
log.error("Error enabling swap")
# and selinux too
if flags.selinux:
# we have to catch the possible exception
# because we support read-only mounting
try:
fd = open("%s/.autorelabel" % iutil.getSysroot(), "w+")
fd.close()
except IOError:
log.warning("cannot touch /.autorelabel")
# set a library path to use mounted fs
libdirs = os.environ.get("LD_LIBRARY_PATH", "").split(":")
mounted = map(lambda dir: "/mnt/sysimage%s" % dir, libdirs)
os.environ["LD_LIBRARY_PATH"] = ":".join(libdirs + mounted)
# find groff data dir
gversion = None
try:
glst = os.listdir("/mnt/sysimage/usr/share/groff")
except OSError:
pass
else:
# find a directory which is a numeral, its where
# data files are
for gdir in glst:
if re.match(r'\d[.\d]+\d$', gdir):
gversion = gdir
break
if gversion is not None:
gpath = "/mnt/sysimage/usr/share/groff/"+gversion
os.environ["GROFF_FONT_PATH"] = gpath + '/font'
os.environ["GROFF_TMAC_PATH"] = "%s:/mnt/sysimage/usr/share/groff/site-tmac" % (gpath + '/tmac',)
# do we have bash?
try:
if os.access("/usr/bin/bash", os.R_OK):
os.symlink ("/usr/bin/bash", "/bin/bash")
except OSError:
pass
except (ValueError, LookupError, SyntaxError, NameError):
raise
except Exception as e: # pylint: disable=W0703
log.error("doRescue caught exception: %s", e)
if flags.automatedInstall:
log.error("An error occurred trying to mount some or all of your system")
else:
if not flags.imageInstall:
msg = _("The system will reboot automatically when you "
"exit from the shell.")
else:
msg = _("Run %s to unmount the system "
"when you are finished.") % ANACONDA_CLEANUP
ButtonChoiceWindow(intf.screen, _("Rescue"),
_("An error occurred trying to mount some or all of your "
"system. Some of it may be mounted under %s.\n\n"
"Press <return> to get a shell.") % iutil.getSysroot() + msg,
[_("OK")] )
else:
if flags.automatedInstall and ksdata.reboot.action in [KS_REBOOT, KS_SHUTDOWN]:
log.info("No Linux partitions found")
intf.screen.finish()
print(_("You don't have any Linux partitions. Rebooting.\n"))
sys.exit(0)
else:
if not flags.imageInstall:
msg = _(" The system will reboot automatically when you exit "
"from the shell.")
else:
msg = ""
ButtonChoiceWindow(intf.screen, _("Rescue Mode"),
_("You don't have any Linux partitions. Press "
"return to get a shell.%s") % msg,
[ _("OK") ], width = 50)
msgStr = ""
if rootmounted and not readOnly:
sto.makeMtab()
try:
makeResolvConf(iutil.getSysroot())
except (OSError, IOError) as e:
log.error("error making a resolv.conf: %s", e)
msgStr = _("Your system is mounted under the %s directory.") % iutil.getSysroot()
ButtonChoiceWindow(intf.screen, _("Rescue"), msgStr, [_("OK")] )
# we do not need ncurses anymore, shut them down
intf.shutdown()
#create /etc/fstab in ramdisk, so it is easier to work with RO mounted filesystems
makeFStab()
# run %post if we've mounted everything
if rootmounted and not readOnly and flags.automatedInstall:
from pyanaconda.kickstart import runPostScripts
runPostScripts(ksdata.scripts)
# start shell if reboot wasn't requested
if not flags.automatedInstall or not ksdata.reboot.action in [KS_REBOOT, KS_SHUTDOWN]:
runShell(msg=msgStr)
sys.exit(0)
| gpl-2.0 | 1,907,631,276,107,746,300 | 36.030426 | 117 | 0.526731 | false |
Subsets and Splits