code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# -*- coding: UTF-8 -*-
import sys
WorkList = None
def SH(i):
"""reformatting .SH"""
global WorkList
string = WorkList[i]
l = len(string) - 2
r = 0
while string[0] == '=' and string[l] == '=':
WorkList[i] = string[1:l]
string = WorkList[i]
l = len(string) - 1
r = r + 1
if r == 2:
WorkList[i] = '\n.SH "' + string + '"\n.PP\n'
else:
WorkList[i] = '\n.SS "' + string + '"\n.PP\n'
#---------------------------------------------------------------------------
def TP(i):
"""reformatting .TP"""
global WorkList
string = WorkList[i]
l=0
string1 = WorkList[i + l]
while string1 != '' and string1[0] == ';':
j=0
finish = 0
nexcl = 1
s = 0
while len(string) > j and finish == 0:
if string[j:j+8] == '<nowiki>':
nexcl = 0
j = j + 7
elif string[j:j+9] == '</nowiki>':
nexcl = 1
j = j + 8
elif string[j:j+4] == '<!--':
nexcl = 0
j = j + 3
elif string[j:j+3] == '-->':
nexcl = 1
j = j + 2
if string[j] == ':':
s = 1
finish = nexcl * s
s = 0
j = j + 1
if len(string) == j:
WorkList[i] = '.TP\n.B ' + string[1:]
elif string[j-1] == ':':
WorkList[i] = '.TP\n.B ' + string[1:j-1] + '\n' + string[j:]
l = l + 1
string1 = WorkList[i+l]
while string1 != '' and string1[0] == ':' and string1[1] <> ':' and string1[1] <> ';':
WorkList[i + l] = '.br\n' + string1[1:]
l = l + 1
string1 = WorkList[i + l]
#---------------------------------------------------------------------------
def wiki2man(content):
global WorkList
string = '\n'
string = unicode(string, 'utf-8')
WorkList = [string]
cpt = 0
while string != '' and cpt < len(content):
string = content[cpt]
cpt += 1
WorkList.append(string)
path = sys.argv[0]
n = len(path)
n = n - 11
path = path[:n]
########## Reformatting from wiki to roff ##########
# TH:
string = WorkList[1];
if len(string) > 2 and string[0] != '=' and string[:4] != '<!--' and string[:2] != '{{':
i = 0
while len(string) > i and string[i] != '(':
i = i + 1
WorkList.pop(1)
WorkList.pop(0)
i = 0
tabacc = -1
tab = 0
tab2 = 0
col = 0
nf = 0
nr = 0
excl = 0
nowiki = 0
RS=0
strng = unicode('{{MAN индекс}}', 'utf-8')
while len(WorkList) > i:
string = WorkList[i]
if len(string) > 1:
# reformatting "nowiki"
if string[:9] == '</nowiki>':
WorkList[i] = string[9:]
nowiki = 0
if nowiki == 0:
# reformatting "pre"
if string[:6] == '</pre>':
WorkList[i] = '\n.fi\n.RE\n' + string[6:]
nf = 0
# reformatting "tt"
elif string[:5] == '</tt>':
if string[6:7] == '. ':
WorkList[i] = '\n.fi\n.RE\n' + string[7:]
elif len(string) > 6 and string[6] == '.':
WorkList[i] = '\n.fi\n.RE\n' + string[6:]
else:
WorkList[i] = '\n.fi\n.RE\n' + string[5:]
nf = 0
# reformatting " "
if string[0] == ' ':
if nf == 0:
nf = 1
WorkList[i] = '\n.RS\n.nf\n' + string
elif nf == 1:
WorkList[i] = string
else:
if nf == 1:
nf = 0
WorkList[i] = '\n.fi\n.RE\n'
WorkList.insert(i+1, string)
string = WorkList[i]
if nf != 2 and nowiki == 0:
# reformatting excluded text <!-- * -->
if excl == 1:
WorkList[i] = '.\" ' + string[0:]
string = WorkList[i]
if nf == 0:
# format titles
if string[0] == '=' and string[len(string)-2] == '=':
SH(i)
# format ";"
elif string[0] == ';':
TP(i)
# format ":..."
elif string[0] == ':':
l = 1
s = ''
while string[l] == ':':
l = l + 1;
if RS == l:
s = '\n.br\n'
elif RS < l:
while RS < l:
s = s + '.RS\n'
RS = RS + 1
if string[RS] == ';':
WorkList[i] = s + '.TP\n.B ' + string[RS+1:]
else:
WorkList[i] = s + string[RS:]
string = WorkList[i]
stri = WorkList[i+1]
if RS > 0 and stri[0] <> ':':
while RS > 0:
WorkList[i] = string + '\n.RE\n'
RS = RS - 1
string = WorkList[i]
else:
while RS > 0 and len(stri) > RS-1 and stri[RS-1] <> ':':
RS = RS - 1
WorkList[i] = string + '\n.RE\n'
string = WorkList[i]
# format "*..."
elif string[0] == '*':
WorkList[i] = '.br\n * ' + string[1:]
# format tables 2
elif string[:2] == '{|':
if tab2 > 0:
WorkList[i] = '.RS\n'
tab2 = tab2 + 1
col = 0
else:
WorkList[i] = ''
tab2 = 1
elif string[:2] == '|-' and tab2 > 0:
WorkList[i] = ''
col = 0
elif string[:2] == '|}':
if tab2 == 1:
WorkList[i] = ''
col = 0
tab2 = 0
elif tab2 > 1:
WorkList[i] = '\n.RE\n'
col = 0
tab2 = tab2 - 1
elif string[:8] == '|valign=' and tab2 > 0:
j = 9
while len(string) > j and string[j]!='|':
j = j + 1
if string[j] == '|':
if col == 0:
WorkList[i] = '\n.TP\n' + string[j+1:]
col = 1
elif col > 0:
WorkList[i] = string[j+1:]
col = 2
elif col > 1:
WorkList[i] = '.PP\n' + string[j+1:]
col = col + 1
elif string[:1] == '|' and tab2 > 0:
if col == 0:
WorkList[i] = '\n.TP\n' + string[1:]
col = 1
elif col == 1:
WorkList[i] = string[1:]
col = col + 1
elif col > 1:
WorkList[i] = '\n' + string[1:]
col = col + 1
# delete wiki "Category:"
elif string[:11] == '[[Category:':
WorkList[i] = ''
# delete wiki {{MAN индекс}}
elif string[:14] == strng:
WorkList[i] = ''
# delete wiki [[en:Man ...]]
elif string[:9] == '[[en:Man ':
WorkList[i] = ''
string = WorkList[i]
j = 0
B = -1
I = -1
U = -1
K = -1
K1 = -1
while len(string) > j:
# reformatting excluded text <!-- * -->
if string[j:j+4] == '<!--':
string = string[:j] + '\n.\"' + string[j+4:]
excl = 1
j = j + 1
elif string[j:j+3] == '-->':
string = string[:j] + '\n' + string[j+3:]
excl = 0
j = j - 1
if excl == 0:
# Change some symbols: — « » — © " & < >
if string[j:j+8] == '―':
string = string[:j] + unicode('—', 'utf-8') + string[j+8:]
elif string[j:j+7] == '«':
string = string[:j] + unicode('«', 'utf-8') + string[j+7:]
elif string[j:j+7] == '»':
string = string[:j] + unicode('»', 'utf-8') + string[j+7:]
elif string[j:j+7] == '—':
string = string[:j] + unicode('—', 'utf-8') + string[j+7:]
elif string[j:j+6] == '©':
string = string[:j] + unicode('©', 'utf-8') + string[j+6:]
elif string[j:j+6] == '"':
string = string[:j] + unicode('"', 'utf-8') + string[j+6:]
elif string[j:j+6] == ' ':
string = string[:j] + unicode(' ', 'utf-8') + string[j+6:]
elif string[j:j+5] == '&':
string = string[:j] + unicode('&', 'utf-8') + string[j+5:]
elif string[j:j+4] == '<':
string = string[:j] + unicode('<', 'utf-8') + string[j+4:]
elif string[j:j+4] == '>':
string = string[:j] + unicode('>', 'utf-8') + string[j+4:]
# reformatting "-" or "\"
elif string[j:j+1] == '-':
string = string[0:j] + '\\' + string[j:]
j = j + 1
elif string[j:j+1] == '\\':
string = string[0:j] + '\e' + string[j+1:]
j = j + 1
# reformatting "nowiki"
elif string[j:j+8] == '<nowiki>':
nowiki = 1
if nf != 2:
string = string[:j] + string[j+8:]
j = j
elif string[j:j+9] == '</nowiki>':
nowiki = 0
if nf != 2:
string = string[:j] + string[j+9:]
j = j
if nowiki == 0:
if string[j:j+5] == "'''''":
if B != -1 and I == -1 :
if tabacc == 1:
string = string[:B] + '"' + string[B+3:j] + '"' + string[j+3:]
j = j - 4
B =- 1
else:
string = string[:B] + '\\fB' + string[B+3:j] + '\\fR' + string[j+3:]
j = j + 1
B =- 1
if I != -1 and B == -1:
string = string[:I] + '\\fI' + string[I+2:j] + '\\fR' + string[j+2:]
j = j + 2
I =- 1
# reformatting boolean text 1
elif string[j:j+3] == "'''":
if B == -1:
B = j
else:
if tabacc == 1:
string = string[:B] + '"' + string[B+3:j] + '"' + string[j+3:]
j = j - 4
B =- 1
elif j+3-B > 5:
string = string[:B] + '\\fB' + string[B+3:j] + '\\fR' + string[j+3:]
j = j + 1
B =- 1
# reformatting italic text 1
elif string[j:j+2] == "''" and B == -1:
if I == -1:
I = j
else:
if j+3-I > 2:
string = string[:I] + '\\fI' + string[I+2:j] + '\\fR' + string[j+2:]
j = j + 2
I =- 1
# reformatting "pre"
elif string[j:j+5] == '<pre>':
string = string[:j] + '\n.RS\n.nf\n' + string[j+5:]
nf = 2
j = j + 3
elif string[j:j+6] == '</pre>':
string = string[:j] + '\n.fi\n.RE\n' + string[j+6:]
nf = 0
j = j + 3
# reformatting "code"
elif string[j:j+6] == '<code>':
string = string[:j] + '\n.nf\n' + string[j+6:]
nf = 2
j = j + 3
elif string[j:j+7] == '</code>':
string = string[:j] + '\n.fi\n' + string[j+7:]
nf = 0
j = j + 3
# reformatting "tt"
elif string[j:j+4] == '<tt>':
string = string[:j] + '\n.RS\n.nf\n' + string[j+4:]
nf = 2
j = j + 3
elif string[j:j+5] == '</tt>':
if string[j+5] == '.':
string = string[:j] + '\n.fi\n.RE\n' + string[j+6:]
else:
string = string[:j] + '\n.fi\n.RE\n' + string[j+5:]
nf = 0
j = j + 3
# reformatting "...}}"
elif string[j:j+2] == '}}':
if nr == 1:
string = string[:j] + '\\fR' + string[j+2:]
nr = 0
j = j + 2
elif nr == 2:
string = string[:j] + '\n.RE\n' + string[j+2:]
nr = 0
j = j + 3
# reformatting "{{Codeline|...}}"
elif string[j:j+11] == '{{Codeline|':
string = string[:j] + '\\fB' + string[j+11:]
nr = 1
j = j + 2
# reformatting "{{Warning|...}}"
elif string[j:j+10] == '{{Warning|':
string = string[:j] + '\\fB' + string[j+10:]
nr = 1
j = j + 2
# reformatting "{{Note|...}}"
elif string[j:j+7] == '{{Note|':
string = string[:j] + '\\fI' + string[j+7:]
nr = 1
j = j + 2
# reformatting "{{Discussion|...}}"
elif string[j:j+13] == '{{Discussion|':
string = string[:j] + '\\fI' + string[j+13:]
nr = 1
j = j + 2
# reformatting "{{Filename|...}}"
elif string[j:j+11] == '{{Filename|':
string = string[:j] + '\\fI' + string[j+11:]
nr = 1
j = j + 2
# reformatting "[mailto:...]"
elif string[j:j+8] == '[mailto:':
a = j + 8
while string[a] <> ' ':
a = a + 1
b = a + 1
while string[b] <> ']':
b = b + 1
string = string[:j] + string[a+1:b] + ' <' + string[j+8:a] + '>'
# reformatting "{{Box File|...|...}}"
elif string[j:j+11] == '{{Box File|':
a = j + 11
while string[a] <> '|':
a = a + 1
string = string[:j] + '\n.TP\n.B ' + string[j+11:a] + '\n.RS\n' + string[a+1:]
nr = 2
if nf == 0:
# reformatting boolean text 2
if string[j:j+3] == '<b>':
string = string[:j] + '\\fB' + string[j+3:]
j = j + 2
elif string[j:j+4] == '</b>':
string = string[:j] + '\\fR' + string[j+4:]
j = j + 2
# reformatting italic text 2
elif string[j:j+3] == '<i>':
string = string[:j] + '\\fI' + string[j+3:]
j = j + 2
elif string[j:j+4] == '</i>':
string = string[:j] + '\\fR' + string[j+4:]
j = j + 2
# format underlined text
elif string[j:j+3] == '<u>':
U = j
elif string[j:j+4] == '</u>' and U != -1:
string = string[:U] + '\\fB\\fI' + string[U+3:j] + '\\fB\\fR' + string[j+4:]
j = j + 7
U =- 1
# brake line 1
elif string[j:j+4] == '<br>':
string = string[0:j] + '\n.br\n' + string[j+4:]
j = j + 2
# brake line 2
elif string[j:j+6] == '<br />':
string = string[0:j] + '\n.PP\n' + string[j+6:]
j = j + 2
# format tables 1
elif string[j:j+6] == '<table':
tab = j
while len(string) > j and string[j] != '>':
j = j + 1
if string[j] == '>':
string = string[:tab] + string[j+1:]
j = tab - 1
tab = 1
else:
j = tab
tab = 0
elif string[j:j+3] == '<tr':
Ktab = j
while len(string) > j and string[j] != '>':
j = j + 1
if string[j] == '>':
tabacc = 0
string = string[:Ktab] + '\n.SS ' + string[j+1:]
j = Ktab + 4
else:
j = Ktab
elif string[j:j+4] == '</tr':
Ktab = j
while len(string) > j and string[j] != '>':
j = j + 1
if string[j] == '>':
tabacc =- 1
string = string[:Ktab] + string[j+1:]
j = Ktab - 1
else:
j = Ktab
elif string[j:j+3] == '<td':
Ktab = j
while len(string) > j and string[j] != '>':
j = j + 1
if string[j] == '>':
tabacc = tabacc + 1
if tabacc == 1:
string = string[:Ktab] + string[j+1:]
j = Ktab - 1
else:
string = string[:Ktab] + '\n.PP\n' + string[j+1:]
j = Ktab + 3
else:
j = Ktab
elif string[j:j+4] == '</td':
Ktab = j
while len(string) > j and string[j] != '>':
j = j + 1
if string[j] == '>':
string = string[:Ktab] + string[j+1:]
j = Ktab - 1
else:
j = Ktab
elif string[j:j+7] == '</table':
tab = j
while len(string) > j and string[j] != '>':
j = j + 1
if string[j] == '>':
string = string[:tab] + string[j+1:]
j = tab - 1
tab = 0
else:
j = tab
tab = 1
# format table 2 {| |- | || |}
elif string[j:j+2] == '||' and tab2 > 0 and col > 0:
string = string[:j] + '\n' + string[j+2:]
col = col + 1
# format div????
elif string[j:j+4] == '<div':
div = j
while len(string) > j and string[j] != '>':
j = j + 1
if string[j] == '>':
string = string[:div] + string[j+1:]
j = div - 1
else:
j = div
elif string[j:j+5] == '</div':
div = j
while len(string) > j and string[j] != '>':
j = j + 1
if string[j] == '>':
string = string[:div] + string[j+1:]
j = div - 1
else:
j = div
# format internal links
elif string[j:j+2] == '[[':
K = j
elif string[j] == '|':
if K != -1:
K1 = j
elif string[j:j+2] == ']]':
if K != -1 and K1 != -1:
string = string[:K] + string[K1+1:j] + string[j+2:]
j = j - K1 + K - 2
K =- 1
K1 =- 1
elif K != -1 and K1 == -1:
string = string[:K] + string[K+2:j] + string[j+2:]
j = j - 4
K =- 1
j = j + 1
WorkList[i] = string
i = i + 1
# Make title .TH
string = '\n'
string = string.encode('utf-8')
string = unicode(string, 'utf-8')
WorkList.insert(0, string)
########## Output roff formatted file ##########
# Output encoded symbols:
string = ''
for i in range(len(WorkList)):
string = string + WorkList[i]
# Delete empty lines and some think else..., just for making roff code better:
i = 0
while len(string) > i:
if string[i:i+8] == '.RE\n\n.RS':
string = string[:i+3] + string[i+4:]
if string[i:i+8] == '.RE\n\n.br':
string = string[:i+3] + string[i+4:]
if string[i:i+6] == '\n.SS\n':
string = string[:i+5] + string[i+6:]
if string[i:i+5] == '\n\n.RE':
string = string[:i+1] + string[i+2:]
if string[i:i+5] == '\n\n\n\n\n':
string = string[:i] + string[i+3:]
if string[i:i+4] == '\n\n\n\n':
string = string[:i] + string[i+2:]
if string[i:i+3] == '\n\n\n':
string = string[:i] + string[i+1:]
i = i + 1
return string
#---------------------------------------------------------------------------
| franck-talbart/codelet_tuning_infrastructure | ctr-common/plugins/4e7420cd-904e-4c2a-b08f-02c867ba4cd8/wiki2man.py | Python | gpl-3.0 | 27,388 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Copyright 2012 Unknown <diogo@arch>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
try:
from process.sequence import Alignment
from base.plotter import bar_plot, multi_bar_plot
from process.error_handling import KillByUser
except ImportError:
from trifusion.process.sequence import Alignment
from trifusion.base.plotter import bar_plot, multi_bar_plot
from trifusion.process.error_handling import KillByUser
from collections import OrderedDict, Counter
import pickle
import os
import sqlite3
from os.path import join
import random
import string
import copy
class Cluster(object):
""" Object for clusters of the OrthoMCL groups file. It is useful to set a
number of attributes that will make subsequent filtration and
processing much easier """
def __init__(self, line_string):
"""
To initialize a Cluster object, only a string compliant with the
format of a cluster in an OrthoMCL groups file has to be provided.
This line should contain the name of the group, a colon, and the
sequences belonging to that group separated by whitespace
:param line_string: String of a cluster
"""
# Initializing attributes for parse_string
self.name = None
self.sequences = None
self.species_frequency = {}
# Initializing attributes for apply filter
# If the value is different than None, this will inform downstream
# objects of whether this cluster is compliant with the specified
# gene_threshold
self.gene_compliant = None
# If the value is different than None, this will inform downstream
# objects of whether this cluster is compliant with the specified
# species_threshold
self.species_compliant = None
self.parse_string(line_string)
def parse_string(self, cluster_string):
"""
Parses the string and sets the group name and sequence list attributes
"""
fields = cluster_string.split(":")
# Setting the name and sequence list of the clusters
self.name = fields[0].strip()
self.sequences = fields[1].strip().split()
# Setting the gene frequency for each species in the cluster
self.species_frequency = Counter([field.split("|")[0] for field in
self.sequences])
def remove_taxa(self, taxa_list):
"""
Removes the taxa contained in taxa_list from self.sequences and
self.species_frequency
:param taxa_list: list, each element should be a taxon name
"""
self.sequences = [x for x in self.sequences if x.split("|")[0]
not in taxa_list]
self.species_frequency = dict((taxon, val) for taxon, val in
self.species_frequency.items()
if taxon not in taxa_list)
def apply_filter(self, gene_threshold, species_threshold):
"""
This method will update two Cluster attributes, self.gene_flag and
self.species_flag, which will inform downstream objects if this
cluster respects the gene and species threshold
:param gene_threshold: Integer for the maximum number of gene copies
per species
:param species_threshold: Integer for the minimum number of species
present
"""
# Check whether cluster is compliant with species_threshold
if len(self.species_frequency) >= species_threshold and \
species_threshold:
self.species_compliant = True
else:
self.species_compliant = False
# Check whether cluster is compliant with gene_threshold
if max(self.species_frequency.values()) <= gene_threshold and \
gene_threshold:
self.gene_compliant = True
else:
self.gene_compliant = False
class OrthoGroupException(Exception):
pass
class GroupLight(object):
"""
Analogous to Group object but with several changes to reduce memory usage
"""
def __init__(self, groups_file, gene_threshold=None,
species_threshold=None, ns=None):
self.gene_threshold = gene_threshold if gene_threshold else None
self.species_threshold = species_threshold if species_threshold \
else None
# Attribute containing the list of included species
self.species_list = []
# Attribute that will contain taxa to be excluded from analyses
self.excluded_taxa = []
self.species_frequency = []
# Attributes that will store the number (int) of cluster after gene and
# species filter
self.all_clusters = 0
self.num_gene_compliant = 0
self.num_species_compliant = 0
self.all_compliant = 0
# Attribute containing the total number of sequences
self.total_seqs = 0
# Attribute containing the maximum number of extra copies found in the
# clusters
self.max_extra_copy = 0
# Attribute with name of the group file, which will be an ID
self.name = os.path.abspath(groups_file)
self.table = groups_file.split(os.sep)[-1].split(".")[0]
# Initialize atribute containing the groups filtered using the gene and
# species threshold. This attribute can be updated at any time using
# the update_filtered_group method
self.filtered_groups = []
self._parse_groups(ns)
if type(self.species_threshold) is float:
self._get_sp_proportion()
def groups(self):
"""
Generator for group file. This replaces the self.groups attribute of
the original Group Object. Instead of loading the whole file into
memory, a generator is created to iterate over its contents. It may
run a bit slower but its a lot more memory efficient.
:return:
"""
file_handle = open(self.name)
for line in file_handle:
if line.strip() != "":
yield line.strip()
def iter_species_frequency(self):
"""
In order to prevent permanent changes to the species_frequency
attribute due to the filtering of taxa, this iterable should be used
instead of the said variable. This creates a temporary deepcopy of
species_frequency which will be iterated over and eventually modified.
"""
# Since the items of species_frequency are mutable, this ensures
# that even those objects are correctly cloned
sp_freq = copy.deepcopy(self.species_frequency)
for cl in sp_freq:
yield cl
def _remove_tx(self, line):
"""
Given a group line, remove all references to the excluded taxa
:param line: raw group file line
"""
new_line = "{}:".format(line.split(":")[0])
tx_str = "\t".join([x for x in line.split(":")[1].split() if
x.split("|")[0] not in self.excluded_taxa])
return new_line + tx_str
def _apply_filter(self, cl):
"""
Sets or updates the basic group statistics, such as the number of
orthologs compliant with the gene copy and minimum taxa filters.
:param cl: dictionary. Contains the number of occurrences for each
taxon present in the ortholog cluster
(e.g. {"taxonA": 2, "taxonB": 1).
"""
# First, remove excluded taxa from the cl object since this will
# impact all other filters
for tx in self.excluded_taxa:
cl.pop(tx, None)
if cl:
self.all_clusters += 1
extra_copies = max(cl.values())
if extra_copies > self.max_extra_copy:
self.max_extra_copy = extra_copies
if extra_copies <= self.gene_threshold and self.gene_threshold and\
len(cl) >= self.species_threshold and \
self.species_threshold:
self.num_gene_compliant += 1
self.num_species_compliant += 1
self.all_compliant += 1
elif (extra_copies <= self.gene_threshold and
self.gene_threshold) or self.gene_threshold == 0:
self.num_gene_compliant += 1
elif len(cl) >= self.species_threshold and \
self.species_threshold:
self.num_species_compliant += 1
def _get_compliance(self, cl):
"""
Determines whether an ortholog cluster is compliant with the specified
ortholog filters.
:param ccl: dictionary. Contains the number of occurrences for each
taxon present in the ortholog cluster
(e.g. {"taxonA": 2, "taxonB": 1).
:return: tuple. The first element refers to the gene copy filter
while the second refers to the minimum taxa filter. Values of 1
indicate that the ortholg cluster is compliant.
"""
for tx in self.excluded_taxa:
cl.pop(tx, None)
if cl:
cp = max(cl.values())
if not self.gene_threshold and not self.species_threshold:
return 1, 1
if cp <= self.gene_threshold and self.gene_threshold and\
len(cl) >= self.species_threshold and \
self.species_threshold:
return 1, 1
elif (cp <= self.gene_threshold and self.gene_threshold) or \
not self.gene_threshold:
return 1, 0
elif (len(cl) >= self.species_threshold and
self.species_threshold) or not self.species_threshold:
return 0, 1
else:
return 0, 0
def _reset_counter(self):
self.all_clusters = 0
self.num_gene_compliant = 0
self.num_species_compliant = 0
self.all_compliant = 0
def _parse_groups(self, ns=None):
for cl in self.groups():
if ns:
if ns.stop:
raise KillByUser("")
# Retrieve the field containing the ortholog sequences
sequence_field = cl.split(":")[1]
# Update species frequency list
sp_freq = Counter((x.split("|")[0] for x in
sequence_field.split()))
self.species_frequency.append(sp_freq)
# Update number of sequences
self.total_seqs += len(sequence_field)
# Update max number of extra copies
extra_copies = max(sp_freq.values())
if extra_copies > self.max_extra_copy:
self.max_extra_copy = max(sp_freq.values())
self.species_list.extend([x for x in sp_freq if x not in
self.species_list])
# Apply filters, if any
# gene filter
if self.species_threshold and self.gene_threshold:
self._apply_filter(sp_freq)
def exclude_taxa(self, taxa_list, update_stats=False):
"""
Updates the excluded_taxa attribute and updates group statistics if
update_stats is True. This does not change the Group object data
permanently, only sets an attribute that will be taken into account
when plotting and exporting data.
:param taxa_list: list. List of taxa that should be excluded from
downstream operations
:param update_stats: boolean. If True, it will update the group
statistics
"""
# IF the taxa_list is the same as the excluded_taxa attribute,
# there is nothing to do
if sorted(taxa_list) == sorted(self.excluded_taxa):
return
self.species_list = [x for x in self.species_list + self.excluded_taxa
if x not in taxa_list]
self.excluded_taxa = taxa_list
if update_stats:
self._reset_counter()
for cl in self.iter_species_frequency():
self._apply_filter(cl)
def basic_group_statistics(self, update_stats=True):
if update_stats:
self._reset_counter()
for cl in self.iter_species_frequency():
self._apply_filter(cl)
return len(self.species_frequency), self.total_seqs, \
self.num_gene_compliant, self.num_species_compliant, \
self.all_compliant
def _get_sp_proportion(self):
"""
When the species filter is a float value between 0 and 1, convert
this proportion into absolute values (rounded up), since filters were
already designed for absolutes.
"""
self.species_threshold = int(self.species_threshold *
len(self.species_list))
def update_filters(self, gn_filter, sp_filter, update_stats=False):
"""
Updates the group filter attributes and group summary stats if
update_stats is True. This method does not change the
data of the Group object, only sets attributes that will be taken into
account when plotting or exporting data
:param gn_filter: integer. Maximum number of gene copies allowed in an
ortholog cluster
:param sp_filter: integer/float. Minimum number/proportion of taxa
representation
:param update_stats: boolean. If True it will update the group summary
statistics
"""
# If the provided filters are the same as the current group attributes
# there is nothing to do
if (gn_filter, sp_filter) == (self.gene_threshold,
self.species_threshold):
return
self.gene_threshold = gn_filter
self.species_threshold = sp_filter
if type(self.species_threshold) is float:
self._get_sp_proportion()
if update_stats:
self._reset_counter()
for cl in self.iter_species_frequency():
self._apply_filter(cl)
def retrieve_sequences(self, sqldb, protein_db, dest="./",
shared_namespace=None, outfile=None):
"""
:param sqldb: srting. Path to sqlite database file
:param protein_db: string. Path to protein database file
:param dest: string. Directory where sequences will be exported
:param shared_namespace: Namespace object to communicate with
TriFusion's main process
:param outfile: If set, all sequeces will be instead saved in a
single output file. This is used for the nucleotide sequence export
:return:
"""
if not os.path.exists(dest) and not outfile:
os.makedirs(dest)
if not os.path.exists(join(dest, "header_correspondance")):
os.makedirs(join(dest, "header_correspondance"))
if shared_namespace:
shared_namespace.act = shared_namespace.msg = "Creating database"
# Stores sequences that could not be retrieved
shared_namespace.missed = shared_namespace.counter = 0
shared_namespace.progress = 0
# Get number of lines of protein database
p = 0
with open(protein_db) as fh:
for p, _ in enumerate(fh):
pass
shared_namespace.max_pb = shared_namespace.total = p + 1
# Connect to database
con = sqlite3.connect(sqldb)
c = con.cursor()
table_name = "".join([x for x in protein_db if x.isalnum()]).encode(
"utf8")
# Create table if it does not exist
if not c.execute("SELECT name FROM sqlite_master WHERE type='table' "
"AND name='{}'".format(table_name)).fetchall():
c.execute("CREATE TABLE [{}] (seq_id text PRIMARY KEY, seq text)".
format(table_name))
# Populate database
with open(protein_db) as ph:
seq = ""
for line in ph:
# Kill switch
if shared_namespace:
if shared_namespace.stop:
con.close()
raise KillByUser("")
shared_namespace.progress += 1
shared_namespace.counter += 1
if line.startswith(">"):
if seq != "":
c.execute("INSERT INTO [{}] VALUES (?, ?)".
format(table_name), (seq_id, seq))
seq_id = line.strip()[1:]
seq = ""
else:
seq += line.strip()
con.commit()
if shared_namespace:
shared_namespace.act = shared_namespace.msg = "Fetching sequences"
shared_namespace.good = shared_namespace.counter = 0
shared_namespace.progress = 0
shared_namespace.max_pb = shared_namespace.total = \
self.all_compliant
# Set single output file, if option is set
if outfile:
output_handle = open(join(dest, outfile), "w")
# Fetching sequences
for line, cl in zip(self.groups(), self.iter_species_frequency()):
# Kill switch
if shared_namespace:
if shared_namespace.stop:
con.close()
raise KillByUser("")
# Filter sequences
if self._get_compliance(cl) == (1, 1):
if shared_namespace:
shared_namespace.good += 1
shared_namespace.progress += 1
shared_namespace.counter += 1
# Retrieve sequences from current cluster
if self.excluded_taxa:
line = self._remove_tx(line)
fields = line.split(":")
# Open file
if not outfile:
cl_name = fields[0]
oname = join(dest, cl_name)
mname = join(dest, "header_correspondance", cl_name)
output_handle = open(oname + ".fas", "w")
map_handle = open(mname + "_headerMap.csv", "w")
seqs = fields[-1].split()
for i in seqs:
# Query database
c.execute("SELECT * FROM [{}] WHERE seq_id = ?".
format(table_name), (i,))
vals = c.fetchone()
# Handles cases where the sequence could not be retrieved
# If outfile is set, output_handle will be a single file
# for all groups. If not, it will represent an individual
# group file
try:
if not outfile:
tx_name = vals[0].split("|")[0]
output_handle.write(">{}\n{}\n".format(tx_name,
vals[1]))
map_handle.write("{}; {}\n".format(vals[0],
tx_name))
else:
output_handle.write(">{}\n{}\n".format(vals[0],
vals[1]))
except TypeError:
pass
if not outfile:
output_handle.close()
if outfile:
output_handle.close()
con.close()
def export_filtered_group(self, output_file_name="filtered_groups",
dest="./", shared_namespace=None):
if shared_namespace:
shared_namespace.act = "Exporting filtered orthologs"
shared_namespace.missed = 0
shared_namespace.good = 0
output_handle = open(os.path.join(dest, output_file_name), "w")
for p, (line, cl) in enumerate(zip(self.groups(),
self.iter_species_frequency())):
if shared_namespace:
if shared_namespace.stop:
raise KillByUser("")
if shared_namespace:
shared_namespace.progress = p
if self._get_compliance(cl) == (1, 1):
if shared_namespace:
shared_namespace.good += 1
if self.excluded_taxa:
l = self._remove_tx(line)
else:
l = line
output_handle.write("{}\n".format(l))
output_handle.close()
def bar_species_distribution(self, filt=False):
if filt:
data = Counter((len(cl) for cl in self.iter_species_frequency() if
self._get_compliance(cl) == (1, 1)))
else:
data = Counter((len(cl) for cl in self.species_frequency))
x_labels = [x for x in list(data)]
data = list(data.values())
# When data is empty, return an exception
if not data:
return {"data": None}
# Sort lists
x_labels = [list(x) for x in zip(*sorted(zip(x_labels, data)))][0]
# Convert label to strings
x_labels = [str(x) for x in x_labels]
title = "Taxa frequency distribution"
ax_names = ["Number of taxa", "Ortholog frequency"]
return {"data": [data],
"title": title,
"ax_names": ax_names,
"labels": x_labels,
"table_header": ["Number of species",
"Ortholog frequency"]}
def bar_genecopy_distribution(self, filt=False):
"""
Creates a bar plot with the distribution of gene copies across
clusters
:param filt: Boolean, whether or not to use the filtered groups.
"""
if filt:
data = Counter((max(cl.values()) for cl in
self.iter_species_frequency() if
self._get_compliance(cl) == (1, 1)))
else:
data = Counter((max(cl.values()) for cl in self.species_frequency
if cl))
x_labels = [x for x in list(data)]
data = list(data.values())
# When data is empty, return an exception
if not data:
return {"data": None}
x_labels, data = (list(x) for x in zip(*sorted(zip(x_labels, data))))
# Convert label to strings
x_labels = [str(x) for x in x_labels]
title = "Gene copy distribution"
ax_names = ["Number of gene copies", "Ortholog frequency"]
return {"data": [data],
"labels": x_labels,
"title": title,
"ax_names": ax_names,
"table_header": ["Number of gene copies",
"Ortholog frequency"]}
def bar_species_coverage(self, filt=False):
"""
Creates a stacked bar plot with the proportion of
:return:
"""
data = Counter(dict((x, 0) for x in self.species_list))
self._reset_counter()
for cl in self.iter_species_frequency():
self._apply_filter(cl)
if filt:
data += Counter(dict((x, 1) for x, y in cl.items() if y > 0 and
self._get_compliance(cl) == (1, 1)))
else:
data += Counter(dict((x, 1) for x, y in cl.items() if y > 0))
data = data.most_common()
# When data is empty, return an exception
if not data:
return {"data": None}
x_labels = [str(x[0]) for x in data]
data = [[x[1] for x in data], [self.all_clusters - x[1] if not
filt else self.all_compliant - x[1]
for x in data]]
lgd_list = ["Available data", "Missing data"]
ax_names = [None, "Ortholog frequency"]
return {"data": data,
"labels": x_labels,
"lgd_list": lgd_list,
"ax_names": ax_names}
def bar_genecopy_per_species(self, filt=False):
data = Counter(dict((x, 0) for x in self.species_list))
self._reset_counter()
for cl in self.iter_species_frequency():
self._apply_filter(cl)
if filt:
data += Counter(dict((x, y) for x, y in cl.items() if y > 1 and
self._get_compliance(cl) == (1, 1)))
else:
data += Counter(dict((x, y) for x, y in cl.items() if y > 1))
data = data.most_common()
# When data is empty, return an exception
if not data:
return {"data": None}
x_labels = [str(x[0]) for x in data]
data = [[x[1] for x in data]]
ax_names = [None, "Gene copies"]
return {"data": data,
"labels": x_labels,
"ax_names": ax_names}
class Group(object):
""" This represents the main object of the orthomcl toolbox module. It is
initialized with a file name of a orthomcl groups file and provides
several methods that act on that group file. To process multiple Group
objects, see MultiGroups object """
def __init__(self, groups_file, gene_threshold=None,
species_threshold=None, project_prefix="MyGroups"):
# Initializing thresholds. These may be set from the start, or using
# some method that uses them as arguments
self.gene_threshold = gene_threshold
self.species_threshold = species_threshold
# Attribute containing the list of included species
self.species_list = []
# Attribute that will contain taxa to be excluded from analyses
self.excluded_taxa = []
# Attributes that will store the number (int) of cluster after gene and
# species filter
self.all_compliant = 0
self.num_gene_compliant = 0
self.num_species_compliant = 0
# Attribute containing the total number of sequences
self.total_seqs = 0
# Attribute containing the maximum number of extra copies found in the
# clusters
self.max_extra_copy = 0
# Attribute with name of the group file, which will be an ID
self.group_name = groups_file
# Initialize the project prefix for possible output files
self.prefix = project_prefix
# Initialize attribute containing the original groups
self.groups = []
# Initialize atribute containing the groups filtered using the gene and
# species threshold. This attribute can be updated at any time using
# the update_filtered_group method
self.filtered_groups = []
self.name = None
# Parse groups file and populate groups attribute
self.__parse_groups(groups_file)
def __parse_groups(self, groups_file):
"""
Parses the ortholog clusters in the groups file and populates the
self.groups list with Cluster objects for each line in the groups file.
:param groups_file: File name for the orthomcl groups file
:return: populates the groups attribute
"""
self.name = groups_file
self.species_list = []
groups_file_handle = open(groups_file)
for line in groups_file_handle:
cluster_object = Cluster(line)
# Add cluster to general group list
self.groups.append(cluster_object)
# Update total sequence counter
self.total_seqs += len(cluster_object.sequences)
# Update maximum number of extra copies, if needed
if max(cluster_object.species_frequency.values()) > \
self.max_extra_copy:
self.max_extra_copy = \
max(cluster_object.species_frequency.values())
# Update species_list attribute
self.species_list = list(set(self.species_list).union(
set(cluster_object.species_frequency.keys())))
# If thresholds have been specified, update self.filtered_groups
# attribute
if self.species_threshold and self.gene_threshold:
cluster_object.apply_filter(self.gene_threshold,
self.species_threshold)
if cluster_object.species_compliant and \
cluster_object.gene_compliant:
# Add cluster to the filtered group list
self.filtered_groups.append(cluster_object)
self.all_compliant += 1
# Update num_species_compliant attribute
if cluster_object.species_compliant:
self.num_species_compliant += 1
# Update num_gene_compliant attribute
if cluster_object.gene_compliant:
self.num_gene_compliant += 1
def exclude_taxa(self, taxa_list):
"""
Adds a taxon_name to the excluded_taxa list and updates the
filtered_groups list
"""
self.excluded_taxa.extend(taxa_list)
# Storage variable for new filtered groups
filtered_groups = []
# Reset max_extra_copy attribute
self.max_extra_copy = 0
for cl in self.groups:
cl.remove_taxa(taxa_list)
if cl.iter_sequences and cl.species_frequency:
filtered_groups.append(cl)
# Update maximum number of extra copies, if needed
if max(cl.species_frequency.values()) > self.max_extra_copy:
self.max_extra_copy = max(cl.species_frequency.values())
# Update species_list
self.species_list = sorted(list(set(self.species_list) -
set(taxa_list)))
self.filtered_groups = self.groups = filtered_groups
def get_filters(self):
"""
Returns a tuple with the thresholds for max gene copies and min species
"""
return self.gene_threshold, self.species_threshold
def basic_group_statistics(self):
"""
This method creates a basic table in list format containing basic
information of the groups file (total number of clusters, total number
of sequences, number of clusters below the gene threshold, number of
clusters below the species threshold and number of clusters below the
gene AND species threshold)
:return: List containing number of
[total clusters,
total sequences,
clusters above gene threshold,
clusters above species threshold,
clusters above gene and species threshold]
"""
# Total number of clusters
total_cluster_num = len(self.groups)
# Total number of sequenes
total_sequence_num = self.total_seqs
# Gene compliant clusters
clusters_gene_threshold = self.num_gene_compliant
# Species compliant clusters
clusters_species_threshold = self.num_species_compliant
clusters_all_threshold = len(self.filtered_groups)
statistics = [total_cluster_num, total_sequence_num,
clusters_gene_threshold, clusters_species_threshold,
clusters_all_threshold]
return statistics
def paralog_per_species_statistic(self, output_file_name=
"Paralog_per_species.csv", filt=True):
"""
This method creates a CSV table with information on the number of
paralog clusters per species
:param output_file_name: string. Name of the output csv file
:param filt: Boolean. Whether to use the filtered groups (True) or
total groups (False)
"""
# Setting which clusters to use
if filt:
groups = self.filtered_groups
else:
groups = self.groups
paralog_count = dict((species, 0) for species in self.species_list)
for cluster in groups:
for species in paralog_count:
if cluster.species_frequency[species] > 1:
paralog_count[species] += 1
# Writing table
output_handle = open(output_file_name, "w")
output_handle.write("Species; Clusters with paralogs\n")
for species, val in paralog_count.items():
output_handle.write("%s; %s\n" % (species, val))
output_handle.close()
def export_filtered_group(self, output_file_name="filtered_groups",
dest="./", get_stats=False,
shared_namespace=None):
"""
Export the filtered groups into a new file.
:param output_file_name: string, name of the filtered groups file
:param dest: string, path to directory where the filtered groups file
will be created
:param get_stats: Boolean, whether to return the basic count stats or
not
:param shared_namespace: Namespace object, for communicating with
main process.
"""
if self.filtered_groups:
if shared_namespace:
shared_namespace.act = "Exporting filtered orthologs"
output_handle = open(os.path.join(dest, output_file_name), "w")
if get_stats:
all_orthologs = len(self.groups)
sp_compliant = 0
gene_compliant = 0
final_orthologs = 0
for cluster in self.filtered_groups:
if shared_namespace:
shared_namespace.progress = \
self.filtered_groups.index(cluster)
if cluster.species_compliant and cluster.gene_compliant:
output_handle.write("%s: %s\n" % (
cluster.name, " ".join(cluster.iter_sequences)))
if get_stats:
final_orthologs += 1
if get_stats:
if cluster.species_compliant:
sp_compliant += 1
if cluster.gene_compliant:
gene_compliant += 1
output_handle.close()
if get_stats:
return all_orthologs, sp_compliant, gene_compliant,\
final_orthologs
else:
raise OrthoGroupException("The groups object must be filtered "
"before using the export_filtered_group"
"method")
def update_filters(self, gn_filter, sp_filter):
"""
Sets new values for the self.species_threshold and self.gene_threshold
and updates the filtered_group
:param gn_filter: int. Maximum value for gene copies in cluster
:param sp_filter: int. Minimum value for species in cluster
"""
self.species_threshold = int(sp_filter)
self.gene_threshold = int(gn_filter)
self.update_filtered_group()
def update_filtered_group(self):
"""
This method creates a new filtered group variable, like
export_filtered_group, but instead of writing into a new file, it
replaces the self.filtered_groups variable
"""
self.filtered_groups = []
# Reset gene and species compliant counters
self.num_gene_compliant = 0
self.num_species_compliant = 0
for cluster in self.groups:
cluster.apply_filter(self.gene_threshold, self.species_threshold)
if cluster.species_compliant and cluster.gene_compliant:
self.filtered_groups.append(cluster)
# Update num_species_compliant attribute
if cluster.species_compliant:
self.num_species_compliant += 1
# Update num_gene_compliant attribute
if cluster.gene_compliant:
self.num_gene_compliant += 1
def retrieve_sequences(self, database, dest="./", mode="fasta",
filt=True, shared_namespace=None):
"""
When provided with a database in Fasta format, this will use the
Alignment object to retrieve sequences
:param database: String. Fasta file
:param dest: directory where files will be save
:param mode: string, whether to retrieve sequences to a file ('fasta'),
or a dictionary ('dict')
:param filt: Boolean. Whether to use the filtered groups (True) or
total groups (False)
:param shared_namespace: Namespace object. This argument is meant for
when fast are retrieved in a background process, where there is a need
to update the main process of the changes in this method
:param dest: string. Path to directory where the retrieved sequences
will be created.
"""
if mode == "dict":
seq_storage = {}
if filt:
groups = self.filtered_groups
else:
groups = self.groups
if not os.path.exists("Orthologs"):
os.makedirs("Orthologs")
# Update method progress
if shared_namespace:
shared_namespace.act = "Creating database"
shared_namespace.progress = 0
print("Creating db")
# Check what type of database was provided
#TODO: Add exception handling if file is not parsed with Aligment
if isinstance(database, str):
try:
db_aln = pickle.load(open(database, "rb"))
except (EnvironmentError, pickle.UnpicklingError):
db_aln = Alignment(database)
db_aln = db_aln.alignment
elif isinstance(database, dict):
db_aln = database
else:
raise OrthoGroupException("The input database is neither a string"
"nor a dictionary object")
print("Retrieving seqs")
# Update method progress
if shared_namespace:
shared_namespace.act = "Retrieving sequences"
for cluster in groups:
if shared_namespace:
shared_namespace.progress += 1
if mode == "dict":
seq_storage[cluster.name] = []
output_handle = open(join(dest, cluster.name + ".fas"), "w")
for sequence_id in cluster.iter_sequences:
seq = db_aln[sequence_id]
if mode == "fasta":
output_handle.write(">%s\n%s\n" % (sequence_id, seq))
elif mode == "dict":
seq_storage[cluster.name].append([sequence_id.split("|")[0],
seq])
output_handle.close()
if mode == "dict":
return seq_storage
def bar_species_distribution(self, dest="./", filt=False, ns=None,
output_file_name="Species_distribution"):
"""
Creates a bar plot with the distribution of species numbers across
clusters
:param dest: string, destination directory
:param filt: Boolean, whether or not to use the filtered groups.
:param output_file_name: string, name of the output file
"""
data = []
# Determine which groups to use
if filt:
groups = self.filtered_groups
else:
groups = self.groups
for i in groups:
if ns:
if ns.stop:
raise KillByUser("")
data.append(len([x for x, y in i.species_frequency.items()
if y > 0]))
# Transform data into histogram-like
transform_data = Counter(data)
x_labels = [x for x in list(transform_data)]
y_vals = list(transform_data.values())
# Sort lists
x_labels, y_vals = (list(x) for x in zip(*sorted(zip(x_labels,
y_vals))))
# Convert label to strings
x_labels = [str(x) for x in x_labels]
if ns:
if ns.stop:
raise KillByUser("")
# Create plot
b_plt, lgd, _ = bar_plot([y_vals], x_labels,
title="Taxa frequency distribution",
ax_names=["Number of taxa", "Ortholog frequency"])
b_plt.savefig(os.path.join(dest, output_file_name), bbox_inches="tight",
dpi=400)
# Create table
table_list = [["Number of species", "Ortholog frequency"]]
for x, y in zip(x_labels, y_vals):
table_list.append([x, y])
return b_plt, lgd, table_list
def bar_genecopy_distribution(self, dest="./", filt=False,
output_file_name="Gene_copy_distribution.png"):
"""
Creates a bar plot with the distribution of gene copies across
clusters
:param dest: string, destination directory
:param filt: Boolean, whether or not to use the filtered groups.
:param output_file_name: string, name of the output file
"""
data = []
# Determin which groups to use
if filt:
groups = self.filtered_groups
else:
groups = self.groups
for cl in groups:
# Get max number of copies
max_copies = max(cl.species_frequency.values())
data.append(max_copies)
# Transform data into histogram-like
transform_data = Counter(data)
x_labels = [x for x in list(transform_data)]
y_vals = list(transform_data.values())
# Sort lists
x_labels, y_vals = (list(x) for x in zip(*sorted(zip(x_labels,
y_vals))))
# Convert label to strings
x_labels = [str(x) for x in x_labels]
# Create plot
b_plt, lgd, _ = bar_plot([y_vals], x_labels,
title="Gene copy distribution",
ax_names=["Number of gene copies", "Ortholog frequency"],
reverse_x=False)
b_plt.savefig(os.path.join(dest, output_file_name), bbox_inches="tight",
figsize=(8 * len(x_labels) / 4, 6), dpi=200)
# Create table
table_list = [["Number of gene copies", "Ortholog frequency"]]
for x, y in zip(x_labels, y_vals):
table_list.append([x, y])
return b_plt, lgd, table_list
def bar_species_coverage(self, dest="./", filt=False, ns=None,
output_file_name="Species_coverage"):
"""
Creates a stacked bar plot with the proportion of
:return:
"""
# Determine which groups to use
if filt:
groups = self.filtered_groups
else:
groups = self.groups
data = Counter(dict((x, 0) for x in self.species_list))
for cl in groups:
if ns:
if ns.stop:
raise KillByUser("")
data += Counter(dict((x, 1) for x, y in cl.species_frequency.items()
if y > 0))
xlabels = [str(x) for x in list(data.keys())]
data = [list(data.values()), [len(groups) - x for x in
data.values()]]
lgd_list = ["Available data", "Missing data"]
if ns:
if ns.stop:
raise KillByUser("")
b_plt, lgd, _ = bar_plot(data, xlabels, lgd_list=lgd_list,
ax_names=[None, "Ortholog frequency"])
b_plt.savefig(os.path.join(dest, output_file_name), bbox_inches="tight",
dpi=200)
return b_plt, lgd, ""
class MultiGroups(object):
""" Creates an object composed of multiple Group objects """
def __init__(self, groups_files=None, gene_threshold=None,
species_threshold=None, project_prefix="MyGroups"):
"""
:param groups_files: A list containing the file names of the multiple
group files
:return: Populates the self.multiple_groups attribute
"""
# If a MultiGroups is initialized with duplicate Group objects, these
# will be stored in a list. If all Group objects are unique, the list
# will remain empty
self.duplicate_groups = []
# Initializing thresholds. These may be set from the start, or using
# some method that uses them as arguments
self.gene_threshold = gene_threshold
self.species_threshold = species_threshold
self.prefix = project_prefix
self.multiple_groups = {}
self.filters = {}
if groups_files:
for group_file in groups_files:
# If group_file is already a Group object, just add it
if not isinstance(group_file, Group):
# Check for duplicate group files
group_object = Group(group_file, self.gene_threshold,
self.species_threshold)
else:
group_object = group_file
if group_object.name in self.multiple_groups:
self.duplicate_groups.append(group_object.name)
else:
self.multiple_groups[group_object.name] = group_object
self.filters[group_object.name] = (1,
len(group_object.species_list))
def __iter__(self):
return iter(self.multiple_groups)
def iter_gnames(self):
return (x.name for x in self.multiple_groups)
def get_gnames(self):
return [x.name for x in self.multiple_groups]
def add_group(self, group_obj):
"""
Adds a group object
:param group_obj: Group object
"""
# Check for duplicate groups
if group_obj.name in self.multiple_groups:
self.duplicate_groups.append(group_obj.name)
else:
self.multiple_groups[group_obj.name] = group_obj
def remove_group(self, group_id):
"""
Removes a group object according to its name
:param group_id: string, name matching a Group object name attribute
"""
if group_id in self.multiple_groups:
del self.multiple_groups[group_id]
def get_group(self, group_id):
"""
Returns a group object based on its name. If the name does not match
any group object, returns None
:param group_id: string. Name of group object
"""
try:
return self.multiple_groups[group_id]
except KeyError:
return
def add_multigroups(self, multigroup_obj):
"""
Merges a MultiGroup object
:param multigroup_obj: MultiGroup object
"""
for group_obj in multigroup_obj:
self.add_group(group_obj)
def update_filters(self, gn_filter, sp_filter, group_names=None,
default=False):
"""
This will not change the Group object themselves, only the filter
mapping. The filter is only applied when the Group object is retrieved
to reduce computations
:param gn_filter: int, filter for max gene copies
:param sp_filter: int, filter for min species
:param group_names: list, with names of group objects
"""
if group_names:
for group_name in group_names:
# Get group object
group_obj = self.multiple_groups[group_name]
# Define filters
gn_filter = gn_filter if not default else 1
sp_filter = sp_filter if not default else \
len(group_obj.species_list)
# Update Group object with new filters
group_obj.update_filters(gn_filter, sp_filter)
# Update filter map
self.filters[group_name] = (gn_filter, sp_filter)
for group_name, group_obj in self.multiple_groups.items():
# Define filters
gn_filter = gn_filter if not default else 1
sp_filter = sp_filter if not default else \
len(group_obj.species_list)
# Update Group object with new filters
group_obj.update_filters(gn_filter, sp_filter)
# Update filter map
self.filters[group_name] = (gn_filter, sp_filter)
def basic_multigroup_statistics(self, output_file_name=
"multigroup_base_statistics.csv"):
"""
:param output_file_name:
:return:
"""
# Creates the storage for the statistics of the several files
statistics_storage = OrderedDict()
for group in self.multiple_groups:
group_statistics = group.basic_group_statistics()
statistics_storage[group.name] = group_statistics
output_handle = open(self.prefix + "." + output_file_name, "w")
output_handle.write("Group file; Total clusters; Total sequences; "
"Clusters below gene threshold; Clusters above "
"species threshold; Clusters below gene and above"
" species thresholds\n")
for group, vals in statistics_storage.items():
output_handle.write("%s; %s\n" % (group, ";".join([str(x) for x
in vals])))
output_handle.close()
def bar_orthologs(self, output_file_name="Final_orthologs",
dest="./", stats="total"):
"""
Creates a bar plot with the final ortholog values for each group file
:param output_file_name: string. Name of output file
:param dest: string. output directory
:param stats: string. The statistics that should be used to generate
the bar plot. Options are:
..: "1": Total orthologs
..: "2": Species compliant orthologs
..: "3": Gene compliant orthologs
..: "4": Final orthologs
..: "all": All of the above
Multiple combinations can be provided, for instance: "123" will
display bars for total, species compliant and gene compliant stats
"""
# Stores the x-axis labels
x_labels = []
# Stores final ortholog values for all 4 possible data sets
vals = [[], [], [], []]
lgd = ["Total orthologs", "After species filter", "After gene filter",
"Final orthologs"]
# Get final ortholog values
for g_obj in self.multiple_groups:
x_labels.append(g_obj.name.split(os.sep)[-1])
# Populate total orthologs
if "1" in stats or stats == "all":
vals[0].append(len(g_obj.groups))
# Populate species compliant orthologs
if "2" in stats or stats == "all":
vals[1].append(g_obj.num_species_compliant)
# Populate gene compliant orthologs
if "3" in stats or stats == "all":
vals[2].append(g_obj.num_gene_compliant)
# Populate final orthologs
if "4" in stats or stats == "all":
vals[3].append(len(g_obj.filtered_groups))
# Filter valid data sets
lgd_list = [x for x in lgd if vals[lgd.index(x)]]
vals = [l for l in vals if l]
# Create plot
b_plt, lgd = multi_bar_plot(vals, x_labels, lgd_list=lgd_list)
b_plt.savefig(os.path.join(dest, output_file_name),
bbox_extra_artists=(lgd,), bbox_inches="tight")
# Create table list object
table_list = []
# Create header
table_list.append([""] + x_labels)
# Create content
for i in range(len(vals)):
table_list += [x for x in [[lgd_list[i]] + vals[i]]]
return b_plt, lgd, table_list
def group_overlap(self):
"""
This will find the overlap of orthologs between two group files.
THIS METHOD IS TEMPORARY AND EXPERIMENTAL
"""
def parse_groups(group_obj):
"""
Returns a list with the sorted ortholog clusters
"""
storage = []
for cluster in group_obj.groups:
storage.append(set(cluster.iter_sequences))
return storage
if len(self.multiple_groups) != 2:
raise SystemExit("This method can only be used with two group "
"files")
group1 = self.multiple_groups[0]
group2 = self.multiple_groups[1]
group1_list = parse_groups(group1)
group2_list = parse_groups(group2)
counter = 0
for i in group1_list:
if i in group2_list:
counter += 1
class MultiGroupsLight(object):
"""
Creates an object composed of multiple Group objects like MultiGroups.
However, instead of storing the groups in memory, these are shelved in
the disk
"""
# The report calls available
calls = ['bar_genecopy_distribution',
'bar_species_distribution',
'bar_species_coverage',
'bar_genecopy_per_species']
def __init__(self, db_path, groups=None, gene_threshold=None,
species_threshold=None, project_prefix="MyGroups",
ns=None):
"""
:param groups: A list containing the file names of the multiple
group files
:return: Populates the self.multiple_groups attribute
"""
self.db_path = db_path
# If a MultiGroups is initialized with duplicate Group objects, their
# names will be stored in a list. If all Group objects are unique, the
# list will remain empty
self.duplicate_groups = []
self.groups = {}
self.groups_stats = {}
# Attribute that will store the paths of badly formated group files
self.bad_groups = []
# Initializing thresholds. These may be set from the start, or using
# some method that uses them as arguments
self.gene_threshold = gene_threshold
self.species_threshold = species_threshold
# Initializing mapping of group filters to their names. Should be
# something like {"groupA": (1, 10)}
self.filters = {}
self.taxa_list = {}
self.excluded_taxa = {}
# This attribute will contain a dictionary with the maximum extra copies
# for each group object
self.max_extra_copy = {}
# This attribute will contain a list with the number of species for
# each group object, excluding replicates. If a MultiGroupLight object
# contains Group objects with different taxa numbers, this attribute
# can be used to issue a warning
self.species_number = []
self.prefix = project_prefix
if ns:
ns.files = len(groups)
if groups:
for group_file in groups:
# If group_file is already a Group object, just add it
if not isinstance(group_file, GroupLight):
try:
if ns:
if ns.stop:
raise KillByUser("")
ns.counter += 1
group_object = GroupLight(group_file,
self.gene_threshold,
self.species_threshold,
ns=ns)
except Exception as e:
print(e.message)
self.bad_groups.append(group_file)
continue
else:
group_object = group_file
# Check for duplicate group files
if group_object.name in self.groups:
self.duplicate_groups.append(group_file.name)
else:
self.add_group(group_object)
def __iter__(self):
for k, val in self.groups.items():
yield k, pickle.load(open(val, "rb"))
def clear_groups(self):
"""
Clears the current MultiGroupsLight object
"""
for f in self.groups.values():
os.remove(f)
self.duplicate_groups = []
self.groups = {}
self.groups_stats = {}
self.filters = {}
self.max_extra_copy = {}
self.species_number = []
self.gene_threshold = self.species_threshold = 0
def add_group(self, group_obj):
"""
Adds a group object
:param group_obj: Group object
"""
# Check for duplicate groups
if group_obj.name not in self.groups:
gpath = os.path.join(self.db_path,
"".join(random.choice(string.ascii_uppercase) for _ in
range(15)))
pickle.dump(group_obj, open(gpath, "wb"))
self.groups[group_obj.name] = gpath
self.filters[group_obj.name] = (1, len(group_obj.species_list), [])
self.max_extra_copy[group_obj.name] = group_obj.max_extra_copy
if len(group_obj.species_list) not in self.species_number:
self.species_number.append(len(group_obj.species_list))
else:
self.duplicate_groups.append(group_obj.name)
def remove_group(self, group_id):
"""
Removes a group object according to its name
:param group_id: string, name matching a Group object name attribute
"""
if group_id in self.groups:
os.remove(self.groups[group_id])
del self.groups[group_id]
def get_group(self, group_id):
"""
Returns a group object based on its name. If the name does not match
any group object, returns None
:param group_id: string. Name of group object
"""
try:
return pickle.load(open(self.groups[unicode(group_id)], "rb"))
except KeyError:
return
def add_multigroups(self, multigroup_obj):
"""
Merges a MultiGroup object
:param multigroup_obj: MultiGroup object
"""
for _, group_obj in multigroup_obj:
self.add_group(group_obj)
def update_filters(self, gn_filter, sp_filter, excluded_taxa,
group_names=None, default=False):
"""
This will not change the Group object themselves, only the filter
mapping. The filter is only applied when the Group object is retrieved
to reduce computations
:param gn_filter: int, filter for max gene copies
:param sp_filter: int, filter for min species
:param group_names: list, with names of group objects
"""
# There are no groups to update
if group_names == []:
return
if group_names:
glist = group_names
else:
glist = self.groups
for group_name in glist:
# Get group object
group_obj = pickle.load(open(self.groups[group_name], "rb"))
# Define excluded taxa
group_obj.exclude_taxa(excluded_taxa, True)
# Define filters
gn_filter = gn_filter if not default else 1
sp_filter = sp_filter if not default else \
len(group_obj.species_list)
# Correct maximum filter values after excluding taxa
gn_filter = gn_filter if gn_filter <= group_obj.max_extra_copy \
else group_obj.max_extra_copy
sp_filter = sp_filter if sp_filter <= len(group_obj.species_list) \
else len(group_obj.species_list)
# Update Group object with new filters
group_obj.update_filters(gn_filter, sp_filter)
# Update group stats
self.get_multigroup_statistics(group_obj)
pickle.dump(group_obj, open(self.groups[group_name], "wb"))
# Update filter map
self.filters[group_name] = (gn_filter, group_obj.species_threshold)
self.taxa_list[group_name] = group_obj.species_list
self.excluded_taxa[group_name] = group_obj.excluded_taxa
def get_multigroup_statistics(self, group_obj):
"""
:return:
"""
stats = group_obj.basic_group_statistics()
self.groups_stats[group_obj.name] = {"stats": stats,
"species": group_obj.species_list,
"max_copies": group_obj.max_extra_copy}
def bar_orthologs(self, group_names=None, output_file_name="Final_orthologs",
dest="./", stats="all"):
"""
Creates a bar plot with the final ortholog values for each group file
:param group_names: list. If None, all groups in self.group_stats will
be used to generate the plot. Else, only the groups with the names in
the list will be plotted.
:param output_file_name: string. Name of output file
:param dest: string. output directory
:param stats: string. The statistics that should be used to generate
the bar plot. Options are:
..: "1": Total orthologs
..: "2": Species compliant orthologs
..: "3": Gene compliant orthologs
..: "4": Final orthologs
..: "all": All of the above
Multiple combinations can be provided, for instance: "123" will
display bars for total, species compliant and gene compliant stats
"""
# Stores the x-axis labels
x_labels = []
# Stores final ortholog values for all 4 possible data sets
vals = [[], [], [], []]
lgd = ["Total orthologs", "After species filter", "After gene filter",
"Final orthologs"]
# Determine which groups will be plotted
if group_names:
groups_lst = group_names
else:
groups_lst = self.groups_stats.keys()
for gname in groups_lst:
gstats = self.groups_stats[gname]
x_labels.append(gname.split(os.sep)[-1])
# Populate total orthologs
if "1" in stats or stats == "all":
vals[0].append(gstats["stats"][0])
# Populate species compliant orthologs
if "2" in stats or stats == "all":
vals[1].append(gstats["stats"][3])
# Populate gene compliant orthologs
if "3" in stats or stats == "all":
vals[2].append(gstats["stats"][2])
# Populate final orthologs
if "4" in stats or stats == "all":
vals[3].append(gstats["stats"][4])
# Filter valid data sets
lgd_list = [x for x in lgd if vals[lgd.index(x)]]
vals = [l for l in vals if l]
# Create plot
b_plt, lgd = multi_bar_plot(vals, x_labels, lgd_list=lgd_list)
b_plt.savefig(os.path.join(dest, output_file_name),
bbox_extra_artists=(lgd,), bbox_inches="tight", dpi=200)
# Create table list object
table_list = []
# Create header
table_list.append([""] + x_labels)
# Create content
for i in range(len(vals)):
table_list += [x for x in [[lgd_list[i]] + vals[i]]]
return b_plt, lgd, table_list
__author__ = "Diogo N. Silva"
| ODiogoSilva/TriFusion | trifusion/ortho/OrthomclToolbox.py | Python | gpl-3.0 | 64,833 |
from logger import *
# Easy Demo
"""
log_functions = [('no_negative_ret', 'no_negatives_log')]
log_function_args = []
def query():
def sqrt_filter(x):
return x[0] < 0
get_log('no_negatives_log').filter(sqrt_filter).print_log()
"""
# Intermediate Demo
"""
log_functions = [('add', 'add_log')]
log_function_args = [('mult', 'mult_log')]
def query():
print 'add log:'
get_log('add_log').print_log()
print 'mult log:'
get_log('mult_log').print_log()
"""
# Advanced Demo
"""
log_functions = []
log_function_args = [('process', 'url_log')]
def query():
import re
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
def handle_url(urls):
for url in urls[0]:
if regex.match(url) is not None:
log('valid_url', url)
else:
log('invalid_url', url)
get_log('url_log').map(handle_url)
print 'Valid URLs:'
get_log('valid_url').print_log()
print 'Invalid URLs:'
get_log('invalid_url').print_log()
"""
| BluBambu/LumberjackLogger | src/spec.py | Python | gpl-3.0 | 1,357 |
from marshmallow import EXCLUDE, Schema
from ..fields.objectid import ID
class BaseSchema(Schema):
id = ID(description='ID', dump_only=True)
class Meta:
strict = True
ordered = True
unknown = EXCLUDE
| mekanix/flask-bootstrap-sql-rest | freenit/schemas/base.py | Python | gpl-3.0 | 236 |
#!/usr/bin/env python3
# vim: set encoding=utf-8 tabstop=4 softtabstop=4 shiftwidth=4 expandtab
#########################################################################
# Parts Copyright 2016 C. Strassburg (lib.utils) [email protected]
# Copyright 2017- Serge Wagener [email protected]
#########################################################################
# This file is part of SmartHomeNG
#
# SmartHomeNG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SmartHomeNG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SmartHomeNG If not, see <http://www.gnu.org/licenses/>.
#########################################################################
"""
| *** ATTENTION: This is early work in progress. Interfaces are subject to change. ***
| *** DO NOT USE IN PRODUCTION until you know what you are doing ***
|
This library contains the future network classes for SmartHomeNG.
New network functions and utilities are going to be implemented in this library.
This classes, functions and methods are mainly meant to be used by plugin developers
"""
import logging
import re
import ipaddress
import requests
import select
import socket
import threading
import time
import queue
class Network(object):
""" This Class has some usefull static methods that you can use in your projects """
@staticmethod
def is_mac(mac):
"""
Validates a MAC address
:param mac: MAC address
:type string: str
:return: True if value is a MAC
:rtype: bool
"""
mac = str(mac)
if len(mac) == 12:
for c in mac:
try:
if int(c, 16) > 15:
return False
except:
return False
return True
octets = re.split('[\:\-\ ]', mac)
if len(octets) != 6:
return False
for i in octets:
try:
if int(i, 16) > 255:
return False
except:
return False
return True
@staticmethod
def is_ip(string):
"""
Checks if a string is a valid ip-address (v4 or v6)
:param string: String to check
:type string: str
:return: True if an ip, false otherwise.
:rtype: bool
"""
return (Network.is_ipv4(string) or Network.is_ipv6(string))
@staticmethod
def is_ipv4(string):
"""
Checks if a string is a valid ip-address (v4)
:param string: String to check
:type string: str
:return: True if an ip, false otherwise.
:rtype: bool
"""
try:
ipaddress.IPv4Address(string)
return True
except ipaddress.AddressValueError:
return False
@staticmethod
def is_ipv6(string):
"""
Checks if a string is a valid ip-address (v6)
:param string: String to check
:type string: str
:return: True if an ipv6, false otherwise.
:rtype: bool
"""
try:
ipaddress.IPv6Address(string)
return True
except ipaddress.AddressValueError:
return False
@staticmethod
def is_hostname(string):
"""
Checks if a string is a valid hostname
The hostname has is checked to have a valid format
:param string: String to check
:type string: str
:return: True if a hostname, false otherwise.
:rtype: bool
"""
try:
return bool(re.match("^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])$", string))
except TypeError:
return False
@staticmethod
def get_local_ipv4_address():
"""
Get's local ipv4 address of the interface with the default gateway.
Return '127.0.0.1' if no suitable interface is found
:return: IPv4 address as a string
:rtype: string
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('8.8.8.8', 1))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
@staticmethod
def get_local_ipv6_address():
"""
Get's local ipv6 address of the interface with the default gateway.
Return '::1' if no suitable interface is found
:return: IPv6 address as a string
:rtype: string
"""
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
try:
s.connect(('2001:4860:4860::8888', 1))
IP = s.getsockname()[0]
except:
IP = '::1'
finally:
s.close()
return IP
@staticmethod
def ip_port_to_socket(ip, port):
"""
Returns an ip address plus port to a socket string.
Format is 'ip:port' for IPv4 or '[ip]:port' for IPv6
:return: Socket address / IPEndPoint as string
:rtype: string
"""
if Network.is_ipv6(ip):
ip = '[{}]'.format(ip)
return '{}:{}'.format(ip, port)
@staticmethod
def ipver_to_string(ipver):
"""
Converts a socket address family to an ip version string 'IPv4' or 'IPv6'
:param ipver: Socket family
:type ipver: socket.AF_INET or socket.AF_INET6
:return: 'IPv4' or 'IPv6'
:rtype: string
"""
return 'IPv6' if ipver == socket.AF_INET6 else 'IPv4'
class Http(object):
"""
Creates an instance of the Http class.
:param baseurl: base URL used everywhere in this instance (example: http://www.myserver.tld)
:type baseurl: str
"""
def __init__(self, baseurl=None):
self.logger = logging.getLogger(__name__)
self.baseurl = baseurl
self._response = None
self.timeout = 10
def get_json(self, url=None, params=None):
"""
Launches a GET request and returns JSON answer as a dict or None on error.
:param url: Optional URL to fetch from. If None (default) use baseurl given on init.
:param params: Optional dict of parameters to add to URL query string.
:type url: str
:type params: dict
:return: JSON answer decoded into a dict or None on whatever error occured
:rtype: dict | None
"""
self.__get(url=url, params=params)
json = None
try:
json = self._response.json()
except:
self.logger.warning("Invalid JSON received from {} !".format(url if url else self.baseurl))
return json
def get_text(self, url=None, params=None, encoding=None, timeout=None):
"""
Launches a GET request and returns answer as string or None on error.
:param url: Optional URL to fetch from. Default is to use baseurl given to constructor.
:param params: Optional dict of parameters to add to URL query string.
:param encoding: Optional encoding of the received text. Default is to let the lib try to figure out the right encoding.
:type url: str
:type params: dict
:type encoding: str
:return: Answer decoded into a string or None on whatever error occured
:rtype: str | None
"""
_text = None
if self.__get(url=url, params=params, timeout=timeout):
try:
if encoding:
self._response.encoding = encoding
_text = self._response.text
except:
self.logger.error("Successfull GET, but decoding response failed. This should never happen !")
return _text
def get_binary(self, url=None, params=None):
"""
Launches a GET request and returns answer as raw binary data or None on error.
This is usefull for downloading binary objects / files.
:param url: Optional URL to fetch from. Default is to use baseurl given to constructor.
:param params: Optional dict of parameters to add to URL query string.
:type url: str
:type params: dict
:return: Answer as raw binary objector None on whatever error occured
:rtype: bytes | None
"""
self.__get(url=url, params=params)
return self._response.content
def response_status(self):
"""
Returns the status code (200, 404, ...) of the last executed request.
If GET request was not possible and thus no HTTP statuscode is available the returned status code = 0.
:return: Status code and text of last request
:rtype: (int, str)
"""
try:
(code, reason) = (self._response.status_code, self._response.reason)
except:
code = 0
reason = 'Unable to complete GET request'
return (code, reason)
def response_headers(self):
"""
Returns a dictionary with the server return headers of the last executed request
:return: Headers returned by server
:rtype: dict
"""
return self._response.headers
def response_cookies(self):
"""
Returns a dictionary with the cookies the server may have sent on the last executed request
:return: Cookies returned by server
:rtype: dict
"""
return self._response.cookies
def response_object(self):
"""
Returns the raw response object for advanced ussage. Use if you know what you are doing.
Maybe this lib can be extented to your needs instead ?
:return: Reponse object as returned by underlying requests library
:rtype: `requests.Response <http://docs.python-requests.org/en/master/user/quickstart/#response-content>`_
"""
return self._response
def __get(self, url=None, params=None, timeout=None):
url = url if url else self.baseurl
timeout = timeout if timeout else self.timeout
self.logger.info("Sending GET request to {}".format(url))
try:
self._response = requests.get(url, params=params, timeout=timeout)
self.logger.debug("{} Fetched URL {}".format(self.response_status(), self._response.url))
except Exception as e:
self.logger.warning("Error sending GET request to {}: {}".format(url, e))
return False
return True
class Tcp_client(object):
""" Creates a new instance of the Tcp_client class
:param host: Remote host name or ip address (v4 or v6)
:param port: Remote host port to connect to
:param name: Name of this connection (mainly for logging purposes). Try to keep the name short.
:param autoreconnect: Should the socket try to reconnect on lost connection (or finished connect cycle)
:param connect_retries: Number of connect retries per cycle
:param connect_cycle: Time between retries inside a connect cycle
:param retry_cycle: Time between cycles if :param:autoreconnect is True
:param binary: Switch between binary and text mode. Text will be encoded / decoded using encoding parameter.
:param terminator: Terminator to use to split received data into chunks (split lines <cr> for example). If integer then split into n bytes. Default is None means process chunks as received.
:type host: str
:type port: int
:type name: str
:type autoreconnect: bool
:type connect_retries: int
:type connect_cycle: int
:type retry_cycle: int
:type binary: bool
:type terminator: int | bytes | str
"""
def __init__(self, host, port, name=None, autoreconnect=True, connect_retries=5, connect_cycle=5, retry_cycle=30, binary=False, terminator=False):
self.logger = logging.getLogger(__name__)
# Public properties
self.name = name
self.terminator = None
# "Private" properties
self._host = host
self._port = port
self._autoreconnect = autoreconnect
self._is_connected = False
self._connect_retries = connect_retries
self._connect_cycle = connect_cycle
self._retry_cycle = retry_cycle
self._timeout = 1
self._hostip = None
self._ipver = socket.AF_INET
self._socket = None
self._connect_counter = 0
self._binary = binary
self._connected_callback = None
self._disconnected_callback = None
self._data_received_callback = None
# "Secret" properties
self.__connect_thread = None
self.__connect_threadlock = threading.Lock()
self.__receive_thread = None
self.__receive_threadlock = threading.Lock()
self.__running = True
self.logger.setLevel(logging.DEBUG)
self.logger.info("Initializing a connection to {} on TCP port {} {} autoreconnect".format(self._host, self._port, ('with' if self._autoreconnect else 'without')))
# Test if host is an ip address or a host name
if Network.is_ip(self._host):
# host is a valid ip address (v4 or v6)
self.logger.debug("{} is a valid IP address".format(host))
self._hostip = self._host
if Network.is_ipv6(self._host):
self._ipver = socket.AF_INET6
else:
self._ipver = socket.AF_INET
else:
# host is a hostname, trying to resolve to an ip address (v4 or v6)
self.logger.debug("{} is not a valid IP address, trying to resolve it as hostname".format(host))
try:
self._ipver, sockettype, proto, canonname, socketaddr = socket.getaddrinfo(host, None)[0]
# Check if resolved address is IPv4 or IPv6
if self._ipver == socket.AF_INET: # is IPv4
self._hostip, port = socketaddr
elif self._ipver == socket.AF_INET6: # is IPv6
self._hostip, port, flow_info, scope_id = socketaddr
else:
# This should never happen
self.logger.error("Unknown ip address family {}".format(self._ipver))
self._hostip = None
# Print ip address on successfull resolve
if self._hostip is not None:
self.logger.info("Resolved {} to {} address {}".format(self._host, 'IPv6' if self._ipver == socket.AF_INET6 else 'IPv4', self._hostip))
except:
# Unable to resolve hostname
self.logger.error("Cannot resolve {} to a valid ip address (v4 or v6)".format(self._host))
self._hostip = None
def set_callbacks(self, connected=None, data_received=None, disconnected=None):
""" Set callbacks to caller for different socket events
:param connected: Called whenever a connection is established successfully
:param data_received: Called when data is received
:param disconnected: Called when a connection has been dropped for whatever reason
:type connected: function
:type data_received: function
:type disconnected: function
"""
self._connected_callback = connected
self._disconnected_callback = disconnected
self._data_received_callback = data_received
def connect(self):
""" Connects the socket
:return: False if an error prevented us from launching a connection thread. True if a connection thread has been started.
:rtype: bool
"""
if self._hostip is None: # return False if no valid ip to connect to
self.logger.error("No valid IP address to connect to {}".format(self._host))
self._is_connected = False
return False
if self._is_connected: # return false if already connected
self.logger.error("Already connected to {}, ignoring new request".format(self._host))
return False
self.__connect_thread = threading.Thread(target=self._connect_thread_worker, name='TCP_Connect')
self.__connect_thread.daemon = True
self.__connect_thread.start()
return True
def connected(self):
""" Returns the current connection state
:return: True if an active connection exists,else False.
:rtype: bool
"""
return self._is_connected
def send(self, message):
""" Sends a message to the server. Can be a string, bytes or a bytes array.
:return: True if message has been successfully sent, else False.
:rtype: bool
"""
if not isinstance(message, (bytes, bytearray)):
try:
message = message.encode('utf-8')
except:
self.logger.warning("Error encoding message for client {}".format(self.name))
return False
try:
if self._is_connected:
self._socket.send(message)
else:
return False
except:
self.logger.warning("No connection to {}, cannot send data {}".format(self._host, msg))
return False
return True
def _connect_thread_worker(self):
if not self.__connect_threadlock.acquire(blocking=False):
self.logger.warning("Connection attempt already in progress for {}, ignoring new request".format(self._host))
return
if self._is_connected:
self.logger.error("Already connected to {}, ignoring new request".format(self._host))
return
self.logger.debug("Starting connection cycle for {}".format(self._host))
self._connect_counter = 0
while self.__running and not self._is_connected:
# Try a full connect cycle
while not self._is_connected and self._connect_counter < self._connect_retries and self.__running:
self._connect()
if self._is_connected:
try:
self.__connect_threadlock.release()
self._connected_callback and self._connected_callback(self)
self.__receive_thread = threading.Thread(target=self.__receive_thread_worker, name='TCP_Receive')
self.__receive_thread.daemon = True
self.__receive_thread.start()
except:
raise
return True
self._sleep(self._connect_cycle)
if self._autoreconnect:
self._sleep(self._retry_cycle)
self._connect_counter = 0
else:
break
try:
self.__connect_threadlock.release()
except:
pass
def _connect(self):
self.logger.debug("Connecting to {} using {} {} on TCP port {} {} autoreconnect".format(self._host, 'IPv6' if self._ipver == socket.AF_INET6 else 'IPv4', self._hostip, self._port, ('with' if self._autoreconnect else 'without')))
# Try to connect to remote host using ip (v4 or v6)
try:
self._socket = socket.socket(self._ipver, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
self._socket.settimeout(5)
self._socket.connect(('{}'.format(self._hostip), int(self._port)))
self._socket.settimeout(self._timeout)
self._is_connected = True
self.logger.info("Connected to {} on TCP port {}".format(self._host, self._port))
# Connection error
except Exception as err:
self._is_connected = False
self._connect_counter += 1
self.logger.warning("TCP connection to {}:{} failed with error {}. Counter: {}/{}".format(self._host, self._port, err, self._connect_counter, self._connect_retries))
def __receive_thread_worker(self):
poller = select.poll()
poller.register(self._socket, select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLERR)
__buffer = b''
while self._is_connected and self.__running:
events = poller.poll(1000)
for fd, event in events:
if event & select.POLLHUP:
self.logger.warning("Client socket closed")
# Check if POLLIN event triggered
if event & (select.POLLIN | select.POLLPRI):
msg = self._socket.recv(4096)
# Check if incoming message is not empty
if msg:
# If we transfer in text mode decode message to string
if not self._binary:
msg = str.rstrip(str(msg, 'utf-8'))
# If we work in line mode (with a terminator) slice buffer into single chunks based on terminator
if self.terminator:
__buffer += msg
while True:
# terminator = int means fixed size chunks
if isinstance(self.terminator, int):
i = self.terminator
if i > len(__buffer):
break
# terminator is str or bytes means search for it
else:
i = __buffer.find(self.terminator)
if i == -1:
break
i += len(self.terminator)
line = __buffer[:i]
__buffer = __buffer[i:]
if self._data_received_callback is not None:
self._data_received_callback(self, line)
# If not in terminator mode just forward what we received
else:
if self._data_received_callback is not None:
self._data_received_callback(self, msg)
# If empty peer has closed the connection
else:
# Peer connection closed
self.logger.warning("Connection closed by peer {}".format(self._host))
self._is_connected = False
poller.unregister(self._socket)
self._disconnected_callback and self._disconnected_callback(self)
if self._autoreconnect:
self.logger.debug("Autoreconnect enabled for {}".format(self._host))
self.connect()
def _sleep(self, time_lapse):
time_start = time.time()
time_end = (time_start + time_lapse)
while self.__running and time_end > time.time():
pass
def close(self):
""" Closes the current client socket """
self.logger.info("Closing connection to {} on TCP port {}".format(self._host, self._port))
self.__running = False
if self.__connect_thread is not None and self.__connect_thread.isAlive():
self.__connect_thread.join()
if self.__receive_thread is not None and self.__receive_thread.isAlive():
self.__receive_thread.join()
class _Client(object):
""" Client object that represents a connected client of tcp_server
:param server: The tcp_server passes a reference to itself to access parent methods
:param socket: socket.Socket class used by the Client object
:param fd: File descriptor of socket used by the Client object
:type server: tcp_server
:type socket: function
:type fd: int
"""
def __init__(self, server=None, socket=None, fd=None):
self.logger = logging.getLogger(__name__)
self.name = None
self.ip = None
self.port = None
self.ipver = None
self._message_queue = queue.Queue()
self._data_received_callback = None
self._will_close_callback = None
self._fd = fd
self.__server = server
self.__socket = socket
@property
def socket(self):
return self.__socket
@property
def fd(self):
return self._fd
def set_callbacks(self, data_received=None, will_close=None):
""" Set callbacks for different socket events (client based)
:param data_received: Called when data is received
:type data_received: function
"""
self._data_received_callback = data_received
self._will_close_callback = will_close
def send(self, message):
""" Send a string to connected client
:param msg: Message to send
:type msg: string | bytes | bytearray
:return: True if message has been queued successfully.
:rtype: bool
"""
if not isinstance(message, (bytes, bytearray)):
try:
message = message.encode('utf-8')
except:
self.logger.warning("Error encoding message for client {}".format(self.name))
return False
try:
self._message_queue.put_nowait(message)
except:
self.logger.warning("Error queueing message for client {}".format(self.name))
return False
return True
def send_echo_off(self):
""" Sends an IAC telnet command to ask client to turn it's echo off """
command = bytearray([0xFF, 0xFB, 0x01])
string = self._iac_to_string(command)
self.logger.debug("Sending IAC telnet command: '{}'".format(string))
self.send(command)
def send_echo_on(self):
""" Sends an IAC telnet command to ask client to turn it's echo on again """
command = bytearray([0xFF, 0xFC, 0x01])
string = self._iac_to_string(command)
self.logger.debug("Sending IAC telnet command: '{}'".format(string))
self.send(command)
def process_IAC(self, msg):
""" Processes incomming IAC messages. Does nothing for now except logging them in clear text """
string = self._iac_to_string(msg)
self.logger.debug("Received IAC telnet command: '{}'".format(string))
def close(self):
""" Client socket closes itself """
self._process_queue() # Be sure that possible remaining messages will be processed
self.__socket.shutdown(socket.SHUT_RDWR)
self.logger.info("Closing connection for client {}".format(self.name))
self._will_close_callback and self._will_close_callback(self)
self.set_callbacks(data_received=None, will_close=None)
del self.__message_queue
self.__socket.close()
return True
def _iac_to_string(self, msg):
iac = {1: 'ECHO', 251: 'WILL', 252: 'WON\'T', 253: 'DO', 254: 'DON\'T', 255: 'IAC'}
string = ''
for char in msg:
if char in iac:
string += iac[char] + ' '
else:
string += '<UNKNOWN> '
return string.rstrip()
def _process_queue(self):
while not self._message_queue.empty():
msg = self._message_queue.get_nowait()
try:
string = str(msg, 'utf-8'),
self.logger.debug("Sending '{}' to {}".format(string, self.name))
except:
self.logger.debug("Sending undecodable bytes to {}".format(self.name))
self.__socket.send(msg)
self._message_queue.task_done()
return True
class Tcp_server(object):
""" Creates a new instance of the Tcp_server class
:param interface: Remote interface name or ip address (v4 or v6). Default is '::' which listens on all IPv4 and all IPv6 addresses available.
:param port: Remote interface port to connect to
:param name: Name of this connection (mainly for logging purposes)
:type interface: str
:type port: int
:type name: str
"""
def __init__(self, port, interface='::', name=None):
self.logger = logging.getLogger(__name__)
# Public properties
self.name = name
# "Private" properties
self._interface = interface
self._port = port
self._is_listening = False
self._timeout = 1
self._interfaceip = None
self._ipver = socket.AF_INET
self._socket = None
self._listening_callback = None
self._incoming_connection_callback = None
self._data_received_callback = None
# "Secret" properties
self.__listening_thread = None
self.__listening_threadlock = threading.Lock()
self.__connection_thread = None
self.__connection_threadlock = threading.Lock()
self.__connection_poller = None
self.__message_queues = {}
self.__connection_map = {}
self.__running = True
# Test if host is an ip address or a host name
if Network.is_ip(self._interface):
# host is a valid ip address (v4 or v6)
self.logger.debug("{} is a valid IP address".format(self._interface))
self._interfaceip = self._interface
if Network.is_ipv6(self._interfaceip):
self._ipver = socket.AF_INET6
else:
self._ipver = socket.AF_INET
else:
# host is a hostname, trying to resolve to an ip address (v4 or v6)
self.logger.debug("{} is not a valid IP address, trying to resolve it as hostname".format(self._interface))
try:
self._ipver, sockettype, proto, canonname, socketaddr = socket.getaddrinfo(self._interface, None)[0]
# Check if resolved address is IPv4 or IPv6
if self._ipver == socket.AF_INET:
self._interfaceip, port = socketaddr
elif self._ipver == socket.AF_INET6:
self._interfaceip, port, flow_info, scope_id = socketaddr
else:
self.logger.error("Unknown ip address family {}".format(self._ipver))
self._interfaceip = None
if self._interfaceip is not None:
self.logger.info("Resolved {} to {} address {}".format(self._interface, ipver_to_string(self._ipver), self._hostip))
except:
# Unable to resolve hostname
self.logger.error("Cannot resolve {} to a valid ip address (v4 or v6)".format(self._interface))
self._interfaceip = None
self.__our_socket = Network.ip_port_to_socket(self._interfaceip, self._port)
if not self.name:
self.name = self.__our_socket
self.logger.info("Initializing TCP server socket {}".format(self.__our_socket))
def set_callbacks(self, listening=None, incoming_connection=None, disconnected=None, data_received=None):
""" Set callbacks to caller for different socket events
:param connected: Called whenever a connection is established successfully
:param data_received: Called when data is received
:param disconnected: Called when a connection has been dropped for whatever reason
:type connected: function
:type data_received: function
:type disconnected: function
"""
self._listening_callback = listening
self._incoming_connection_callback = incoming_connection
self._data_received_callback = data_received
self._disconnected_callback = disconnected
def start(self):
""" Start the server socket
:return: False if an error prevented us from launching a connection thread. True if a connection thread has been started.
:rtype: bool
"""
if self._is_listening:
return
try:
self._socket = socket.socket(self._ipver, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind((self._interfaceip, self._port))
except Exception as e:
self.logger.error("Problem binding to interface {} on port {}: {}".format(self._interfaceip, self._port, e))
self._is_listening = False
return False
else:
self.logger.debug("Bound listening socket to interface {} on port {}".format(self._interfaceip, self._port))
try:
self._socket.listen(5)
self._socket.setblocking(0)
self.logger.info("Listening on socket {}".format(self.__our_socket))
except Exception as e:
self.logger.error("Problem starting listening socket on interface {} port {}: {}".format(self._interfaceip, self._port, e))
self._is_listening = False
return False
self._is_listening = True
self._listening_callback and self._listening_callback(self)
self.__listening_thread = threading.Thread(target=self.__listening_thread_worker, name='TCP_Listener')
self.__listening_thread.daemon = True
self.__listening_thread.start()
return True
def __listening_thread_worker(self):
poller = select.poll()
poller.register(self._socket, select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLERR)
self.logger.debug("Waiting for incomming commections on socket {}".format(self.__our_socket))
while self.__running:
events = poller.poll(1000)
for fd, event in events:
if event & select.POLLERR:
self.logger.debug("Listening thread POLLERR")
if event & select.POLLHUP:
self.logger.debug("Listening thread POLLHUP")
if event & (select.POLLIN | select.POLLPRI):
connection, peer = self._socket.accept()
connection.setblocking(0)
fd = connection.fileno()
__peer_socket = Network.ip_port_to_socket(peer[0], peer[1])
client = _Client(server=self, socket=connection, fd=fd)
client.ip = peer[0]
client.ipver = socket.AF_INET6 if Network.is_ipv6(client.ip) else socket.AF_INET
client.port = peer[1]
client.name = Network.ip_port_to_socket(client.ip, client.port)
self.logger.info("Incoming connection from {} on socket {}".format(__peer_socket, self.__our_socket))
self.__connection_map[fd] = client
self._incoming_connection_callback and self._incoming_connection_callback(self, client)
if self.__connection_thread is None:
self.logger.debug("Connection thread not running yet, firing it up ...")
self.__connection_thread = threading.Thread(target=self.__connection_thread_worker, name='TCP_Server')
if self.__connection_poller is None:
self.__connection_poller = select.poll()
self.__connection_poller.register(connection, select.POLLOUT | select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLERR)
if not self.__connection_thread.isAlive():
self.__connection_thread.daemon = True
self.__connection_thread.start()
del client
def __connection_thread_worker(self):
""" This thread handles the send & receive tasks of connected clients. """
self.logger.debug("Connection thread on socket {} starting up".format(self.__our_socket))
while self.__running and len(self.__connection_map) > 0:
events = self.__connection_poller.poll(1000)
for fd, event in events:
__client = self.__connection_map[fd]
__socket = __client.socket
if event & select.POLLERR:
self.logger.debug("Connection thread POLLERR")
if event & select.POLLHUP:
self.logger.debug("Connection thread POLLHUP")
if event & select.POLLOUT:
if not __client._message_queue.empty():
__client._process_queue()
if event & (select.POLLIN | select.POLLPRI):
msg = __socket.recv(4096)
if msg:
try:
string = str.rstrip(str(msg, 'utf-8'))
self.logger.debug("Received '{}' from {}".format(string, __client.name))
self._data_received_callback and self._data_received_callback(self, __client, string)
__client._data_received_callback and __client._data_received_callback(self, __client, string)
except:
self.logger.debug("Received undecodable bytes from {}".format(__client.name))
if msg[0] == 0xFF:
__client.process_IAC(msg)
else:
self._remove_client(__client)
del __socket
del __client
self.__connection_poller = None
self.__connection_thread = None
self.logger.debug("Last connection closed for socket {}, stopping connection thread".format(self.__our_socket))
def listening(self):
""" Returns the current listening state
:return: True if the server socket is actually listening, else False.
:rtype: bool
"""
return self._is_listening
def send(self, client, msg):
""" Send a string to connected client
:param client: Client Object to send message to
:param msg: Message to send
:type client: network.Client
:type msg: string | bytes | bytearray
:return: True if message has been queued successfully.
:rtype: bool
"""
return client.send(msg)
def disconnect(self, client):
""" Disconnects a specific client
:param client: Client Object to disconnect
:type client: network.Client
"""
client.close()
return True
def _remove_client(self, client):
self.logger.info("Lost connection to client {}, removing it".format(client.name))
self._disconnected_callback and self._disconnected_callback(self, client)
self.__connection_poller.unregister(client.fd)
del self.__connection_map[client.fd]
return True
def _sleep(self, time_lapse):
""" Non blocking sleep. Does return when self.close is called and running set to False.
:param time_lapse: Time in seconds to sleep
:type time_lapse: float
"""
time_start = time.time()
time_end = (time_start + time_lapse)
while self.__running and time_end > time.time():
pass
def close(self):
""" Closes running listening socket """
self.logger.info("Shutting down listening socket on interface {} port {}".format(self._interface, self._port))
self.__running = False
if self.__listening_thread is not None and self.__listening_thread.isAlive():
self.__listening_thread.join()
if self.__connection_thread is not None and self.__connection_thread.isAlive():
self.__connection_thread.join()
| Foxi352/netlib | network.py | Python | gpl-3.0 | 39,974 |
# -*- coding: utf-8 -*-
# This file is part of Dyko
# Copyright © 2008-2010 Kozea
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kalamar. If not, see <http://www.gnu.org/licenses/>.
"""
Site
====
Site class. Create one for each independent site with its own configuration.
"""
import logging
from .request import normalize, make_request, And, Condition, Or, Not
from .query import QueryFilter, QuerySelect, QueryChain, QueryOrder, QueryRange,\
QueryDistinct, QueryAggregate
from .access_point import DEFAULT_PARAMETER
def _translate_request(request, aliases):
"""Translate high-level ``request`` to low-level using ``aliases``."""
if isinstance(request, And):
return And(*(_translate_request(req, aliases)
for req in request.sub_requests))
elif isinstance(request, Or):
return Or(*(_translate_request(req, aliases)
for req in request.sub_requests))
elif isinstance(request, Not):
return Not(_translate_request(request.sub_request, aliases))
elif isinstance(request, Condition):
name = repr(request.property)
if name in aliases:
# The complete path has already been selected,
# Let's use the alias instead !
new_name = aliases.get(name, name)
request.property.name = new_name
request.property.child_property = None
return request
elif name in aliases.values():
return request
elif ".".join(name.split(".")[:-1] + ["*"]) in aliases:
return request
else:
new_name = "__%s" % name.replace(".", "_")
aliases[name] = new_name
request.property.name = new_name
request.property.child_property = None
return request
def _delegate_to_acces_point(method_name, first_arg_is_a_request=False):
"""Create a function delegating ``method_name`` to an access point."""
if first_arg_is_a_request:
def wrapper(self, access_point_name, request=None, *args, **kwargs):
"""Call ``access_point.method_name(request, *args, **kwargs)``."""
access_point = self.access_points[access_point_name]
request = normalize(access_point.properties, request)
return getattr(access_point, method_name)(request, *args, **kwargs)
else:
def wrapper(self, access_point_name, *args, **kwargs):
"""Call ``access_point.method_name(*args, **kwargs)``."""
access_point = self.access_points[access_point_name]
return getattr(access_point, method_name)(*args, **kwargs)
# Redefining documentation and name of the wrappers
# pylint: disable=W0622
wrapper.__name__ = method_name
wrapper.__doc__ = \
"Call :meth:`kalamar.access_point.AccessPoint.%s`." % method_name
# pylint: enable=W0622
return wrapper
class Site(object):
"""Kalamar site."""
def __init__(self):
self.access_points = {}
self.logger = logging.getLogger("dyko")
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
self.logger.addHandler(NullHandler())
def register(self, name, access_point):
"""Add an access point to this site.
:param name: Identifier string of the added access point.
:param access_point: Concrete subclass of :class:`AccessPoint`.
"""
if name in self.access_points:
raise RuntimeError(
"Site already has an access point named %r." % name)
self.access_points[name] = access_point
access_point.bind(self, name)
def view(self, access_point_name, aliases=None, request=None, order_by=None,
select_range=None, distinct=False, aggregate=None, query=None):
"""Call :meth:`kalamar.access_point.AccessPoint.view`.
If ``alias`` and ``request`` are given, a query is created from them.
The query is then validated and then passed to the ``view`` method of
the acess point called ``access_point_name``.
"""
access_point = self.access_points[access_point_name]
if aliases is None:
aliases = {"": "*"}
if query is None:
# Add dummy selects to be able to filter on those
chain = []
aliases = dict(((value, key) for key, value in aliases.items()))
request = make_request(request)
request = _translate_request(request, aliases)
aliases = dict(((value, key) for key, value in aliases.items()))
chain.append(QuerySelect(aliases))
chain.append(QueryFilter(request))
if distinct:
chain.append(QueryDistinct())
if order_by is not None:
chain.append(QueryOrder(order_by))
if aggregate is not None:
chain.append(QueryAggregate(aggregate))
if select_range is not None:
if hasattr(select_range, "__iter__"):
select_range = slice(*select_range)
else:
select_range = slice(select_range)
chain.append(QueryRange(select_range))
query = QueryChain(chain)
query.validate(access_point.properties)
for line in access_point.view(query):
for prop_name in [name for name in line if name.startswith("__")]:
line.pop(prop_name)
yield line
def from_repr(self, access_point_name, repr, default=DEFAULT_PARAMETER):
"""
Return an item of ``access_point_name`` from the ``repr`` string.
``repr`` should have been generated with item.__repr__()
"""
access_point = self.access_points[access_point_name]
return access_point.loader_from_reference_repr(repr)(None)[0]
create = _delegate_to_acces_point("create")
delete = _delegate_to_acces_point("delete")
delete_many = _delegate_to_acces_point("delete_many", True)
open = _delegate_to_acces_point("open", True)
search = _delegate_to_acces_point("search", True)
save = _delegate_to_acces_point("save")
| Kozea/Dyko | kalamar/site.py | Python | gpl-3.0 | 6,799 |
from logging import Logger
from requests.sessions import Session
def getpocket_download(session: Session, _logger: Logger):
"""
This does the heavy lifting
:param session:
:param _logger:
:return:
"""
headers = {
"Origin": "https://app.getpocket.com", # checked that this is needed
}
params = {
"enable_cors": "1", # checked that this is needed
"consumer_key": "78809-9423d8c743a58f62b23ee85c", # checked that this is needed
}
url = "https://getpocket.com/v3/get"
response = session.post(url=url, headers=headers, params=params)
response.raise_for_status()
obj = response.json()
print(obj)
| veltzer/scrapers | pyscrapers/workers/getpocket.py | Python | gpl-3.0 | 679 |
from setuptools import setup, find_packages
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError: # for pip <= 9.0.3
from pip.req import parse_requirements
import re, ast
# get version from __version__ variable in bench/__init__.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('bench/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
requirements = parse_requirements("requirements.txt", session="")
setup(
name='bench',
description='Metadata driven, full-stack web framework',
author='Frappe Technologies',
author_email='[email protected]',
version=version,
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=[str(ir.req) for ir in requirements],
dependency_links=[str(ir._link) for ir in requirements if ir._link],
entry_points='''
[console_scripts]
bench=bench.cli:cli
''',
)
| bailabs/bench-v7 | setup.py | Python | gpl-3.0 | 963 |
#!/usr/bin/env python
from os import path
from collections import defaultdict
import math
root = path.dirname(path.dirname(path.dirname(__file__)))
result_dir = path.join(root, 'results')
def get_file_name(test):
test = '%s_result' % test
return path.join(result_dir, test)
def mean(l):
return float(sum(l))/len(l) if len(l) > 0 else float('nan')
def std_dev(l):
m = mean(l)
return math.sqrt(sum((x - m) ** 2 for x in l) / len(l))
def run_timing_overhead_ana():
test_name = 'timing_overhead'
file_name = get_file_name(test_name)
datas = []
with open(file_name) as f:
for l in f:
datas.append(int(l))
datas = [i for i in datas[:10000]]
print "%s mean: %f" % (test_name, mean(datas))
print "%s std dev: %f" % (test_name, std_dev(datas))
def run_loop_overhead_ana():
test_name = 'loop_overhead'
file_name = get_file_name(test_name)
datas = []
with open(file_name) as f:
for l in f:
datas.append(float(l.split(' ')[0]))
datas = [i for i in datas[:10000]]
print "%s mean: %f" % (test_name, mean(datas))
print "%s std dev: %f" % (test_name, std_dev(datas))
def run_proc_call_overhead_ana():
test_name = 'proc_call_overhead'
file_name = get_file_name(test_name)
datas = []
with open(file_name) as f:
for l in f:
if l.startswith('-'):
datas.append([])
continue
datas[-1].append(int(l.split(' ')[0]) * 1.0 / 10)
print "%s result:" % test_name
for i, data in enumerate(datas):
m = mean(data)
std = std_dev(data)
print "%f\t%f" % (m, std)
#print "%s %d mean: %f" % (test_name, i, mean(data))
#print "%s %d std dev: %f" % (test_name, i, std_dev(data))
def run_process_context_switch_ana():
test_name = 'process_context_switch'
file_name = get_file_name(test_name)
datas = []
with open(file_name) as f:
for l in f:
try:
datas.append(int(l.split(' ')[1]))
except:
pass
datas = [i for i in datas[:100]]
print "%s mean: %f" % (test_name, mean(datas))
print "%s std dev: %f" % (test_name, std_dev(datas))
def run_thread_context_switch_ana():
test_name = 'thread_context_switch'
file_name = get_file_name(test_name)
datas = []
with open(file_name) as f:
for l in f:
datas.append(int(l.split(' ')[1]))
datas = [i for i in datas[:100]]
print "%s mean: %f" % (test_name, mean(datas))
print "%s std dev: %f" % (test_name, std_dev(datas))
def run_mem_acc_ana():
test_name = 'mem_acc'
filename = get_file_name(test_name)
datas = defaultdict(lambda: defaultdict(list))
with open(filename) as f:
for l in f:
ll = l.split(' ')
step = int(ll[7])
offset = int(ll[1])
cycle = float(ll[3])
datas[step][offset].append(cycle)
results = {}
offsets = set()
for step, v in sorted(datas.items()):
result = []
for offset, cycles in sorted(v.items()):
offsets.add(offset)
m = mean(cycles)
result.append(m)
results[step] = (result)
print "mem access time result"
fl = "step/offset\t%s" % "\t".join(str(i) for i in sorted(offsets))
print fl
for step, means in sorted(results.items()):
line = "\t".join(str(i) for i in means)
line = "%s\t%s" % (str(step), line)
print line
if __name__ == '__main__':
run_timing_overhead_ana()
run_loop_overhead_ana()
run_proc_call_overhead_ana()
run_process_context_switch_ana()
run_thread_context_switch_ana()
run_mem_acc_ana()
| sheimi/os-benchmark | script/analysis/analysis.py | Python | gpl-3.0 | 3,736 |
# This file is part of pylabels, a Python library to create PDFs for printing
# labels.
# Copyright (C) 2012, 2013, 2014, 2015 Blair Bonnett
#
# pylabels is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# pylabels is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# pylabels. If not, see <http://www.gnu.org/licenses/>.
from decimal import Decimal
import json
class InvalidDimension(ValueError):
"""Raised when a sheet specification has inconsistent dimensions. """
pass
class Specification(object):
"""Specification for a sheet of labels.
All dimensions are given in millimetres. If any of the margins are not
given, then any remaining space is divided equally amongst them. If all the
width or all the height margins are given, they must exactly use up all
non-label space on the sheet.
"""
def __init__(self, sheet_width, sheet_height, columns, rows, label_width, label_height, **kwargs):
"""
Required parameters
-------------------
sheet_width, sheet_height: positive dimension
The size of the sheet.
columns, rows: positive integer
The number of labels on the sheet.
label_width, label_size: positive dimension
The size of each label.
Margins and gaps
----------------
left_margin: positive dimension
The gap between the left edge of the sheet and the first column.
column_gap: positive dimension
The internal gap between columns.
right_margin: positive dimension
The gap between the right edge of the sheet and the last column.
top_margin: positive dimension
The gap between the top edge of the sheet and the first row.
row_gap: positive dimension
The internal gap between rows.
bottom_margin: positive dimension
The gap between the bottom edge of the sheet and the last row.
Padding
-------
left_padding, right_padding, top_padding, bottom_padding: positive dimensions, default 0
The padding between the edges of the label and the area available
to draw on.
Corners
---------------------
corner_radius: positive dimension, default 0
Gives the labels rounded corners with the given radius.
padding_radius: positive dimension, default 0
Give the drawing area rounded corners. If there is no padding, this
must be set to zero.
Background
----------
background_image: reportlab.graphics.shape.Image
An image to use as the background to the page. This will be
automatically sized to fit the page; make sure it has the correct
aspect ratio.
background_filename: string
Filename of an image to use as a background to the page. If both
this and background_image are given, then background_image will
take precedence.
Raises
------
InvalidDimension
If any given dimension is invalid (i.e., the labels cannot fit on
the sheet).
"""
# Compulsory arguments.
self._sheet_width = Decimal(sheet_width)
self._sheet_height = Decimal(sheet_height)
self._columns = int(columns)
self._rows = int(rows)
self._label_width = Decimal(label_width)
self._label_height = Decimal(label_height)
# Optional arguments; missing ones will be computed later.
self._left_margin = kwargs.pop('left_margin', None)
self._column_gap = kwargs.pop('column_gap', None)
self._right_margin = kwargs.pop('right_margin', None)
self._top_margin = kwargs.pop('top_margin', None)
self._row_gap = kwargs.pop('row_gap', None)
self._bottom_margin = kwargs.pop('bottom_margin', None)
# Optional arguments with default values.
self._left_padding = kwargs.pop('left_padding', 0)
self._right_padding = kwargs.pop('right_padding', 0)
self._top_padding = kwargs.pop('top_padding', 0)
self._bottom_padding = kwargs.pop('bottom_padding', 0)
self._corner_radius = Decimal(kwargs.pop('corner_radius', 0))
self._padding_radius = Decimal(kwargs.pop('padding_radius', 0))
self._background_image = kwargs.pop('background_image', None)
self._background_filename = kwargs.pop('background_filename', None)
# Leftover arguments.
if kwargs:
args = kwargs.keys()
if len(args) == 1:
raise TypeError("Unknown keyword argument {}.".format(args[0]))
else:
raise TypeError("Unknown keyword arguments: {}.".format(', '.join(args)))
# Track which attributes have been automatically set.
self._autoset = set()
# Check all the dimensions etc are valid.
self._calculate()
def _calculate(self):
"""Checks the dimensions of the sheet are valid and consistent.
NB: this is called internally when needed; there should be no need for
user code to call it.
"""
# Check the dimensions are larger than zero.
for dimension in ('_sheet_width', '_sheet_height', '_columns', '_rows', '_label_width', '_label_height'):
if getattr(self, dimension) <= 0:
name = dimension.replace('_', ' ').strip().capitalize()
raise InvalidDimension("{0:s} must be greater than zero.".format(name))
# Check margins / gaps are not smaller than zero if given.
# At the same time, force the values to decimals.
for margin in ('_left_margin', '_column_gap', '_right_margin', '_top_margin', '_row_gap', '_bottom_margin',
'_left_padding', '_right_padding', '_top_padding', '_bottom_padding'):
val = getattr(self, margin)
if val is not None:
if margin in self._autoset:
val = None
else:
val = Decimal(val)
if val < 0:
name = margin.replace('_', ' ').strip().capitalize()
raise InvalidDimension("{0:s} cannot be less than zero.".format(name))
setattr(self, margin, val)
else:
self._autoset.add(margin)
# Check the corner radius.
if self._corner_radius < 0:
raise InvalidDimension("Corner radius cannot be less than zero.")
if self._corner_radius > (self._label_width / 2):
raise InvalidDimension("Corner radius cannot be more than half the label width.")
if self._corner_radius > (self._label_height / 2):
raise InvalidDimension("Corner radius cannot be more than half the label height.")
# If there is no padding, we don't need the padding radius.
if (self._left_padding + self._right_padding + self._top_padding + self._bottom_padding) == 0:
if self._padding_radius != 0:
raise InvalidDimension("Padding radius must be zero if there is no padding.")
else:
if (self._left_padding + self._right_padding) >= self._label_width:
raise InvalidDimension("Sum of horizontal padding must be less than the label width.")
if (self._top_padding + self._bottom_padding) >= self._label_height:
raise InvalidDimension("Sum of vertical padding must be less than the label height.")
if self._padding_radius < 0:
raise InvalidDimension("Padding radius cannot be less than zero.")
# Calculate the amount of spare space.
hspace = self._sheet_width - (self._label_width * self._columns)
vspace = self._sheet_height - (self._label_height * self._rows)
# Cannot fit.
if hspace < 0:
raise InvalidDimension("Labels are too wide to fit on the sheet.")
if vspace < 0:
raise InvalidDimension("Labels are too tall to fit on the sheet.")
# Process the horizontal margins / gaps.
hcount = 1 + self._columns
if self._left_margin is not None:
hspace -= self._left_margin
if hspace < 0:
raise InvalidDimension("Left margin is too wide for the labels to fit on the sheet.")
hcount -= 1
if self._column_gap is not None:
hspace -= ((self._columns - 1) * self._column_gap)
if hspace < 0:
raise InvalidDimension("Column gap is too wide for the labels to fit on the sheet.")
hcount -= (self._columns - 1)
if self._right_margin is not None:
hspace -= self._right_margin
if hspace < 0.01 and hspace > -0.01:
self._right_margin += hspace
hspace = 0
if hspace < 0:
raise InvalidDimension("Right margin is too wide for the labels to fit on the sheet.")
hcount -= 1
# Process the vertical margins / gaps.
vcount = 1 + self._rows
if self._top_margin is not None:
vspace -= self._top_margin
if vspace < 0:
raise InvalidDimension("Top margin is too tall for the labels to fit on the sheet.")
vcount -= 1
if self._row_gap is not None:
vspace -= ((self._rows - 1) * self._row_gap)
if vspace < 0:
raise InvalidDimension("Row gap is too tall for the labels to fit on the sheet.")
vcount -= (self._rows - 1)
if self._bottom_margin is not None:
vspace -= self._bottom_margin
if vspace < 0.01 and vspace > -0.01:
self._bottom_margin += vspace
vspace = 0
if vspace < 0:
raise InvalidDimension("Bottom margin is too tall for the labels to fit on the sheet.")
vcount -= 1
# If all the margins are specified, they must use up all available space.
if hcount == 0 and hspace != 0:
raise InvalidDimension("Not all width used by manually specified margins/gaps; {}mm left.".format(hspace))
if vcount == 0 and vspace != 0:
raise InvalidDimension("Not all height used by manually specified margins/gaps; {}mm left.".format(vspace))
# Split any extra horizontal space and allocate it.
if hcount:
auto_margin = hspace / hcount
for margin in ('_left_margin', '_column_gap', '_right_margin'):
if getattr(self, margin) is None:
setattr(self, margin, auto_margin)
# And allocate any extra vertical space.
if vcount:
auto_margin = vspace / vcount
for margin in ('_top_margin', '_row_gap', '_bottom_margin'):
if getattr(self, margin) is None:
setattr(self, margin, auto_margin)
def bounding_boxes(self, mode='fraction', output='dict'):
"""Get the bounding boxes of the labels on a page.
Parameters
----------
mode: 'fraction', 'actual'
If 'fraction', the bounding boxes are expressed as a fraction of the
height and width of the sheet. If 'actual', they are the actual
position of the labels in millimetres from the top-left of the
sheet.
output: 'dict', 'json'
If 'dict', a dictionary with label identifier tuples (row, column)
as keys and a dictionary with 'left', 'right', 'top', and 'bottom'
entries as the values.
If 'json', a JSON encoded string which represents a dictionary with
keys of the string format 'rowxcolumn' and each value being a
bounding box dictionary with 'left', 'right', 'top', and 'bottom'
entries.
Returns
-------
The bounding boxes in the format set by the output parameter.
"""
boxes = {}
# Check the parameters.
if mode not in ('fraction', 'actual'):
raise ValueError("Unknown mode {0}.".format(mode))
if output not in ('dict', 'json'):
raise ValueError("Unknown output {0}.".format(output))
# Iterate over the rows.
for row in range(1, self.rows + 1):
# Top and bottom of all labels in the row.
top = self.top_margin + ((row - 1) * (self.label_height + self.row_gap))
bottom = top + self.label_height
# Now iterate over all columns in this row.
for column in range(1, self.columns + 1):
# Left and right position of this column.
left = self.left_margin + ((column - 1) * (self.label_width + self.column_gap))
right = left + self.label_width
# Output in the appropriate mode format.
if mode == 'fraction':
box = {
'top': top / self.sheet_height,
'bottom': bottom / self.sheet_height,
'left': left / self.sheet_width,
'right': right / self.sheet_width,
}
elif mode == 'actual':
box = {'top': top, 'bottom': bottom, 'left': left, 'right': right}
# Add to the collection.
if output == 'json':
boxes['{0:d}x{1:d}'.format(row, column)] = box
box['top'] = float(box['top'])
box['bottom'] = float(box['bottom'])
box['left'] = float(box['left'])
box['right'] = float(box['right'])
else:
boxes[(row, column)] = box
# Done.
if output == 'json':
return json.dumps(boxes)
return boxes
# Helper function to create an accessor for one of the properties.
# attr is the 'internal' attribute e.g., _sheet_width.
def create_accessor(attr, deletable=False):
# Getter is simple; no processing needed.
@property
def accessor(self):
return getattr(self, attr)
# Setter is more complicated.
@accessor.setter
def accessor(self, value):
# Store the original value in case we need to reset.
original = getattr(self, attr)
# If this was originally autoset or not.
was_autoset = attr in self._autoset
# Discard this attribute from the autoset list.
self._autoset.discard(attr)
# Set the value and see if it is valid.
setattr(self, attr, value)
try:
self._calculate()
except:
# Reset to the original state.
setattr(self, attr, original)
if was_autoset:
self._autoset.add(attr)
# Let the error propogate up.
raise
# Create a deleter if allowable.
if deletable:
@accessor.deleter
def accessor(self):
self._autoset.add(attr)
setattr(self, attr, None)
self._calculate()
# And we now have our accessor.
return accessor
# Create accessors for all our properties.
sheet_width = create_accessor('_sheet_width')
sheet_height = create_accessor('_sheet_height')
label_width = create_accessor('_label_width')
label_height = create_accessor('_label_height')
columns = create_accessor('_columns')
rows = create_accessor('_rows')
left_margin = create_accessor('_left_margin', deletable=True)
column_gap = create_accessor('_column_gap', deletable=True)
right_margin = create_accessor('_right_margin', deletable=True)
top_margin = create_accessor('_top_margin', deletable=True)
row_gap = create_accessor('_row_gap', deletable=True)
bottom_margin = create_accessor('_bottom_margin', deletable=True)
corner_radius = create_accessor('_corner_radius')
padding_radius = create_accessor('_padding_radius')
background_image = create_accessor('_background_image', deletable=True)
background_filename = create_accessor('_background_filename', deletable=True)
left_padding = create_accessor('_left_padding', deletable=True)
right_padding = create_accessor('_right_padding', deletable=True)
top_padding = create_accessor('_top_padding', deletable=True)
bottom_padding = create_accessor('_bottom_padding', deletable=True)
# Don't need the helper function any more.
del create_accessor
| bcbnz/pylabels | labels/specifications.py | Python | gpl-3.0 | 17,105 |
import os.path
import unittest
from unittest.mock import patch
import libpipe
from libpipe.cmds.align import HisatCmd
import logging
log = logging.getLogger(__name__)
class TestHistatCmd(unittest.TestCase):
def setUp(self):
# prevent error logs from occuring during testing
patcher = patch.object(libpipe.cmds.base.log, 'error')
patcher.start()
self.addCleanup(patcher.stop)
# override base cmd method
patcher = patch.object(libpipe.cmds.base.BaseCmd, '_cmd')
patcher.start()
self.addCleanup(patcher.stop)
def sample_cmd(self):
kw = {
'-U': 'upath/seq.fa',
'-x': 'gpath/gen',
'timestamp': '000',
'-S': 'path/al.sam',
}
return HisatCmd(**kw)
#
# Test _prepcmd
#
def test_prepcmd_sets_S_if_not_given(self):
hc = self.sample_cmd()
del hc.kwargs['-S']
hc._prepcmd()
self.assertEqual(
hc.kwargs['-S'],
'upath/seq_gen.sam',
)
def test_prepcmd_sets_redirect_to_log_file(self):
hc = self.sample_cmd()
hc._prepcmd()
self.assertTrue(
hc.redirect.endswith('path/al_gen_000_hisat.log'),
'Redirect not set to expected log file ({})'.format(hc.redirect),
)
def test_prepcmd_sets_redirect_for_stdout_and_stderr_to_tee(self):
hc = self.sample_cmd()
hc._prepcmd()
self.assertTrue(
hc.redirect.startswith('2>&1 | tee -a'),
'Redirect not set properly: {}'.format(hc.redirect),
)
def test_prepcmd_sets_unal_based_on_given_samfile_name(self):
hc = self.sample_cmd()
hc._prepcmd()
expected_file = os.path.splitext(hc.kwargs['-S'])[0] + '.unal.fastq'
self.assertIn('--un', hc.kwargs)
self.assertEqual(hc.kwargs['--un'], expected_file)
#
# Test cmd
#
def test_cmd_raises_AttributeError_if_only_one_ppe_given(self):
hc = self.sample_cmd()
hc.kwargs['-1'] = hc.kwargs['-U']
del hc.kwargs['-U']
with self.assertRaises(AttributeError):
hc.cmd()
def test_addreq_raises_FileNotFoundError_if_n_idx_ne_expected(self):
with patch('remsci.lib.utility.path.walk_file') as m:
for i in [0, 100]:
with self.subTest(n_indx=i):
m.return_value = [0] * i
hc = self.sample_cmd()
with self.assertRaises(FileNotFoundError):
hc._additional_requirements()
#
# Test _prepreq
#
def test_prepreq_raises_TypeError_if_linked_input_not_used(self):
with patch.object(
HisatCmd, 'output', autospec=True, return_value=['seq.txt']):
ohc = self.sample_cmd()
ihc = self.sample_cmd()
ohc.link(ihc)
with self.assertRaises(TypeError):
ihc._prepreq()
def test_prepreq_sets_single_link_input_to_U_kwarg(self):
with patch.object(HisatCmd, 'output', return_value=['seq.fq']):
ohc = self.sample_cmd()
ihc = self.sample_cmd()
ohc.link(ihc)
ihc._prepreq()
self.assertEqual(ihc.kwargs['-U'], 'seq.fq')
def test_prepreq_sets_double_link_input_to_1_and_2_kwarg(self):
args = ['seq.1.fq', 'seq.2.fq']
with patch.object(HisatCmd, 'output', return_value=args):
ohc = self.sample_cmd()
ihc = self.sample_cmd()
ohc.link(ihc)
ihc._prepreq()
self.assertEqual(ihc.kwargs['-1'], 'seq.1.fq')
self.assertEqual(ihc.kwargs['-2'], 'seq.2.fq')
def test_prepreq_preserves_kwargs_if_no_input_given(self):
ihc = self.sample_cmd()
ihc._prepreq()
self.assertEqual(ihc.kwargs['-U'], 'upath/seq.fa')
if __name__ == '__main__':
unittest.main()
| muppetjones/rempipe | tests/cmds/test_align.py | Python | gpl-3.0 | 3,934 |
#!/usr/bin/python
#This script create simulation and reconstruction options
import os
import sys
import re
if len(sys.argv)<4:
print "Usage: make-sim-options.py <decay_file> <output_prefix> <event_number>"
exit(1)
HOME_DIR = os.environ['HOME']
JPSIKKROOT_DIR = os.environ['JPSIKKROOT']
SHARE_DIR = os.path.join(JPSIKKROOT_DIR, "share")
TEMPLATE_DIR = os.path.join(JPSIKKROOT_DIR, "share/template")
TEMPLATE_SIM_FILE = os.path.joint(TEMPLATE_DIR, "simulation.cfg")
print HOMEDIR, JPSIKKROOT_DIR, TE
DECAY_FILE = os.path.abspath(os.path.join(SHARE_DIR,sys.argv[1]))
PREFIX = sys.argv[2]
RTRAW_FILE = os.path.abspath(PREFIX+".rtraw")
DST_FILE = os.path.abspath(PREFIX+".dst")
ROOT_FILE = os.path.abspath(PREFIX+".root")
| ekherit/JpsiKK | share/make-sim-options.py | Python | gpl-3.0 | 732 |
'''Defines the Special class for theia.'''
# Provides:
# class Special
# __init__
# lines
import numpy as np
from ..helpers import geometry, settings
from ..helpers.units import deg, cm, pi
from .optic import Optic
class Special(Optic):
'''
Special class.
This class represents general optics, as their actions on R and T are left
to the user to input. They are useful for special optics which are neither
reflective nor transmissive.
Actions:
* T on HR: user input
* R on HR: user input
* T on AR: user input
* R on AR: user input
**Note**: by default the actions of these objects are those of
beamsplitters (0, 0, 0, 0)
*=== Additional attributes with respect to the Optic class ===*
None
*=== Name ===*
Special
**Note**: the curvature of any surface is positive for a concave surface
(coating inside the sphere).
Thus kurv*HRNorm/|kurv| always points to the center
of the sphere of the surface, as is the convention for the lineSurfInter of
geometry module. Same for AR.
******* HRK > 0 and ARK > 0 ******* HRK > 0 and ARK < 0
***** ******** and |ARK| > |HRK|
H***A H*********A
***** ********
******* *******
'''
Name = "Special"
def __init__(self, Wedge = 0., Alpha = 0., X = 0., Y = 0., Z = 0.,
Theta = pi/2., Phi = 0., Diameter = 10.e-2,
HRr = .99, HRt = .01, ARr = .1, ARt = .9,
HRK = 0.01, ARK = 0, Thickness = 2.e-2,
N = 1.4585, KeepI = False,
RonHR = 0, TonHR = 0, RonAR = 0, TonAR = 0,
Ref = None):
'''Special optic initializer.
Parameters are the attributes.
Returns a special optic.
'''
# actions
TonHR = int(TonHR)
RonHR = int(RonHR)
TonAR = int(TonAR)
RonAR = int(RonAR)
# Initialize input data
N = float(N)
Wedge = float(Wedge)
Alpha = float(Alpha)
Theta = float(Theta)
Phi = float(Phi)
Diameter = float(Diameter)
Thickness = float(Thickness)
HRK = float(HRK)
ARK = float(ARK)
HRt = float(HRt)
HRr = float(HRr)
ARt = float(ARt)
ARr = float(ARr)
#prepare for mother initializer
HRNorm = np.array([np.sin(Theta)*np.cos(Phi),
np.sin(Theta) * np.sin(Phi),
np.cos(Theta)], dtype = np.float64)
HRCenter = np.array([X, Y, Z], dtype = np.float64)
#Calculate ARCenter and ARNorm with wedge and alpha and thickness:
ARCenter = HRCenter\
- (Thickness + .5 * np.tan(Wedge) * Diameter) * HRNorm
a,b = geometry.basis(HRNorm)
ARNorm = -np.cos(Wedge) * HRNorm\
+ np.sin(Wedge) * (np.cos(Alpha) * a\
+ np.sin(Alpha) * b)
super(Special, self).__init__(ARCenter = ARCenter, ARNorm = ARNorm,
N = N, HRK = HRK, ARK = ARK, ARr = ARr, ARt = ARt, HRr = HRr, HRt = HRt,
KeepI = KeepI, HRCenter = HRCenter, HRNorm = HRNorm,
Thickness = Thickness, Diameter = Diameter,
Wedge = Wedge, Alpha = Alpha,
TonHR = TonHR, RonHR = RonHR, TonAR = TonAR, RonAR = RonAR,
Ref = Ref)
#Warnings for console output
if settings.warning:
self.geoCheck("mirror")
def lines(self):
'''Returns the list of lines necessary to print the object.'''
sph = geometry.rectToSph(self.HRNorm)
return ["Special: %s {" % str(self.Ref),
"TonHR, RonHR: %s, %s" % (str(self.TonHR), str(self.RonHR)),
"TonAR, RonAR: %s, %s" % (str(self.TonAR), str(self.RonAR)),
"Thick: %scm" % str(self.Thick/cm),
"Diameter: %scm" % str(self.Dia/cm),
"Wedge: %sdeg" % str(self.Wedge/deg),
"Alpha: %sdeg" % str(self.Alpha/deg),
"HRCenter: %s" % str(self.HRCenter),
"HRNorm: (%s, %s)deg" % (str(sph[0]/deg), str(sph[1]/deg)),
"Index: %s" %str(self.N),
"HRKurv, ARKurv: %s, %s" % (str(self.HRK), str(self.ARK)),
"HRr, HRt, ARr, ARt: %s, %s, %s, %s" \
% (str(self.HRr), str(self.HRt), str(self.ARr), str(self.ARt)),
"}"]
| bandang0/theia | theia/optics/special.py | Python | gpl-3.0 | 4,456 |
#!/usr/bin/env python
import sys, shutil
try:
from gi.repository import Gtk, Gdk, Vte, GLib, Pango, GConf, GdkPixbuf
import json, os, getpass
from pycm.pycm_globals import *
except ImportError as e:
print "Error during importing of necessaries modules.\nError is '%s'" % e
sys.exit()
python_path = "/usr/lib/python2.7/dist-packages/"
module_path = python_path + 'pycm'
bin_exe = '/usr/bin/pycm.py'
launcher = '/usr/share/applications/pyconnection-manager.desktop'
uid = os.getuid()
def __init__():
if uid > 0:
print "You need to be root to install pyConnection Manager"
sys.exit()
try:
remove_old()
except OSError, IOError:
print "ERROR removing old stuff"
sys.exit()
try:
create_new()
except OSError, IOError:
print "ERROR installing pyConnection Manager"
sys.exit()
ok = "\n\tpyConnection Manager succesfully installed\n"
print ok
def remove_old():
if os.path.exists(module_path):
shutil.rmtree(module_path)
if os.path.exists(GLADE_DIR):
shutil.rmtree(GLADE_DIR)
if os.path.exists(IMAGE_DIR):
shutil.rmtree(IMAGE_DIR)
if os.path.exists(bin_exe):
os.remove(bin_exe)
if os.path.exists(launcher):
os.remove(launcher)
def create_new():
shutil.copytree('pycm', module_path)
shutil.copytree('glade', GLADE_DIR)
shutil.copytree('img', IMAGE_DIR)
shutil.copyfile('pycm.py', '/usr/bin/pycm')
shutil.copyfile('pyconnection-manager.desktop', launcher)
dir_list = [module_path, GLADE_DIR, IMAGE_DIR]
for i in dir_list:
os.chmod(i, 655)
__init__() | maurelio79/pyConnection_Manager | setup.py | Python | gpl-3.0 | 1,662 |
"""
Message Queue wrapper
"""
__RCSID__ = "$Id$"
from DIRAC.FrameworkSystem.private.standardLogging.Handler.MessageQueueHandler import MessageQueueHandler
from DIRAC.Resources.LogBackends.AbstractBackend import AbstractBackend
from DIRAC.FrameworkSystem.private.standardLogging.Formatter.JsonFormatter import JsonFormatter
class MessageQueueBackend(AbstractBackend):
"""
MessageQueueBackend is used to create an abstraction of the handler and the formatter concepts from logging.
Here, we have:
- MessageQueueHandler: which is a custom handler created in DIRAC to send
log records to a Message Queue server. You can find it in: FrameworkSys./private/standardlogging/Handler
- BaseFormatter: is a custom Formatter object, created for DIRAC in order to get the appropriate display.
You can find it in FrameworkSystem/private/standardLogging/Formatter
"""
def __init__(self):
"""
Initialization of the MessageQueueBackend
"""
super(MessageQueueBackend, self).__init__(None, JsonFormatter)
self.__queue = ''
def createHandler(self, parameters=None):
"""
Each backend can initialize its attributes and create its handler with them.
:params parameters: dictionary of parameters. ex: {'FileName': file.log}
"""
if parameters is not None:
self.__queue = parameters.get("MsgQueue", self.__queue)
self._handler = MessageQueueHandler(self.__queue)
def setLevel(self, level):
"""
No possibility to set the level of the MessageQueue handler.
It is not set by default so it can send all Log Records of all levels to the MessageQueue.
"""
pass
| arrabito/DIRAC | Resources/LogBackends/MessageQueueBackend.py | Python | gpl-3.0 | 1,639 |
# Copyright 2008 by Kate Scheppke and Wade Brainerd.
# This file is part of Typing Turtle.
#
# Typing Turtle is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Typing Turtle is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Typing Turtle. If not, see <http://www.gnu.org/licenses/>.
import math
import random, datetime
from gettext import gettext as _
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('PangoCairo', '1.0')
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
from gi.repository import Pango
from gi.repository import PangoCairo
import medalscreen
BALLOON_COLORS = [
(65535, 0, 0),
(0, 0, 65535),
(65535, 32768, 0),
(0, 32768, 65535),
]
class Balloon:
def __init__(self, x, y, vx, vy, word):
self.x = x
self.y = y
self.vx = vx
self.vy = vy
self.word = word
self.size = max(100, 50 + len(word) * 20)
self.color = random.choice(BALLOON_COLORS)
class BalloonGame(Gtk.VBox):
def __init__(self, lesson, activity):
GObject.GObject.__init__(self)
self.lesson = lesson
self.activity = activity
# Build title bar.
title = Gtk.Label()
title.set_markup("<span size='20000'><b>" + lesson['name'] + "</b></span>")
title.set_alignment(1.0, 0.0)
stoplabel = Gtk.Label(label=_('Go Back'))
stopbtn = Gtk.Button()
stopbtn.add(stoplabel)
stopbtn.connect('clicked', self.stop_cb)
hbox = Gtk.HBox()
hbox.pack_start(stopbtn, False, False, 10)
hbox.pack_end(title, False, False, 10)
# Build the game drawing area.
self.area = Gtk.DrawingArea()
self.draw_cb_id = self.area.connect("draw", self.draw_cb)
# Connect keyboard grabbing and releasing callbacks.
self.area.connect('realize', self.realize_cb)
self.area.connect('unrealize', self.unrealize_cb)
self.pack_start(hbox, False, False, 10)
self.pack_start(self.area, True, True, 0)
self.show_all()
# Initialize the game data.
self.balloons = []
self.score = 0
self.spawn_delay = 10
self.count = 0
self.count_left = self.lesson.get('length', 60)
self.medal = None
self.finished = False
# Start the animation loop running.
self.update_timer = GObject.timeout_add(20, self.tick, priority=GObject.PRIORITY_HIGH_IDLE+30)
def realize_cb(self, widget):
self.activity.add_events(Gdk.EventMask.KEY_PRESS_MASK)
self.key_press_cb_id = self.activity.connect('key-press-event', self.key_cb)
# Clear the mouse cursor.
#pixmap = Gdk.Pixmap(widget.window, 10, 10)
#color = Gdk.Color()
#cursor = Gdk.Cursor.new(pixmap, pixmap, color, color, 5, 5)
#widget.window.set_cursor(cursor)
def unrealize_cb(self, widget):
self.activity.disconnect(self.key_press_cb_id)
def stop_cb(self, widget):
# Stop the animation loop.
if self.update_timer:
try:
GObject.source_remove(self.update_timer)
except:
pass # Try remove instance, if not found, just pass
self.activity.pop_screen()
def key_cb(self, widget, event):
# Ignore hotkeys.
if event.get_state() & (Gdk.ModifierType.CONTROL_MASK | Gdk.ModifierType.MOD1_MASK):
return False
# Extract information about the key pressed.
key = Gdk.keyval_to_unicode(event.keyval)
if key != 0: key = chr(key)
if self.finished:
key_name = Gdk.keyval_name(event.keyval)
if key_name == 'Return':
self.activity.pop_screen()
# Show the new medal if there was one.
if self.medal:
self.activity.push_screen(medalscreen.MedalScreen(self.medal, self.activity))
else:
for b in self.balloons:
if b.word[0] == key:
b.word = b.word[1:]
self.add_score(1)
# Pop the balloon if it's been typed.
if len(b.word) == 0:
self.balloons.remove(b)
self.add_score(100)
self.queue_draw_balloon(b)
break
return False
def update_balloon(self, b):
b.x += b.vx
b.y += b.vy
if b.x < 100 or b.x >= self.bounds.width - 100:
b.vx = -b.vx
if b.y < -100:
self.balloons.remove(b)
self.queue_draw_balloon(b)
def tick(self):
if self.finished:
return False
self.bounds = self.area.get_allocation()
for b in self.balloons:
self.update_balloon(b)
self.spawn_delay -= 1
if self.count_left >= 0 and self.spawn_delay <= 0:
self.count += 1
self.count_left -= 1
word = random.choice(self.lesson['words'])
x = random.randint(100, self.bounds.width - 100)
y = self.bounds.height + 100
vx = random.uniform(-2, 2)
vy = -2 #random.uniform(-5, -3)
b = Balloon(x, y, vx, vy, word)
self.balloons.append(b)
if self.count < 10:
delay = 200
elif self.count < 20:
delay = 150
else:
delay = 100
self.spawn_delay = random.randint(delay-20, delay+20)
if self.count_left <= 0 and len(self.balloons) == 0:
self.finish_game()
return True
def draw_results(self, cr):
# Draw background.
w = self.bounds.width - 400
h = self.bounds.height - 200
x = self.bounds.width/2 - w/2
y = self.bounds.height/2 - h/2
cr.set_source_rgb(0.762, 0.762, 0.762)
cr.rectangle(x, y, w, h)
cr.fill()
cr.set_source_rgb(0, 0, 0)
cr.rectangle(x, y, w, h)
cr.stroke()
# Draw text
title = _('You finished!') + '\n'
cr.set_source_rgb(0, 0, 0)
pango_layout = PangoCairo.create_layout(cr)
fd = Pango.FontDescription('Serif Bold')
fd.set_size(16 * Pango.SCALE)
pango_layout.set_font_description(fd)
pango_layout.set_text(title,
len(title))
size = pango_layout.get_size()
tx = x + (w / 2) - (size[0] / Pango.SCALE) / 2
ty = y + 100
cr.move_to(tx, ty)
PangoCairo.update_layout(cr, pango_layout)
PangoCairo.show_layout(cr, pango_layout)
report = ''
report += _('Your score was %(score)d.') % { 'score': self.score } + '\n'
if self.medal:
report += _('You earned a %(type)s medal!') % self.medal + '\n'
report += '\n'
report += _('Press the ENTER key to continue.')
cr.set_source_rgb(0, 0, 0)
pango_layout = PangoCairo.create_layout(cr)
fd = Pango.FontDescription('Times')
fd.set_size(12 * Pango.SCALE)
pango_layout.set_font_description(fd)
pango_layout.set_text(report, len(report))
size = pango_layout.get_size()
sx = x + w / 2 - (size[0] / Pango.SCALE) / 2
sy = y + 200
cr.move_to(sx, sy)
PangoCairo.update_layout(cr, pango_layout)
PangoCairo.show_layout(cr, pango_layout)
def finish_game(self):
self.finished = True
# Add to the lesson history.
report = {
'lesson': self.lesson['name'],
'score': self.score,
}
self.activity.add_history(report)
# Show the medal screen, if one should be given.
got_medal = None
medals = self.lesson['medals']
for medal in medals:
if self.score >= medal['score']:
got_medal = medal['name']
if got_medal:
# Award the medal.
medal = {
'lesson': self.lesson['name'],
'type': got_medal,
'date': datetime.date.today().strftime('%B %d, %Y'),
'nick': self.activity.nick,
'score': self.score
}
self.medal = medal
# Compare this medal with any existing medals for this lesson.
# Only record the best one.
add_medal = True
if self.lesson['name'] in self.activity.data['medals']:
old_medal = self.activity.data['medals'][self.lesson['name']]
order = ' '.join([m['name'] for m in medals])
add_idx = order.index(medal['type'])
old_idx = order.index(old_medal['type'])
if add_idx < old_idx:
add_medal = False
elif add_idx == old_idx:
if medal['score'] < old_medal['score']:
add_medal = False
if add_medal:
self.activity.data['motd'] = 'newmedal'
self.activity.data['medals'][self.lesson['name']] = medal
# Refresh the main screen given the new medal.
self.activity.mainscreen.show_lesson(self.activity.mainscreen.lesson_index)
self.queue_draw()
def queue_draw_balloon(self, b):
x = int(b.x - b.size/2) - 5
y = int(b.y - b.size/2) - 5
w = int(b.size + 100)
h = int(b.size*1.5 + 10)
self.area.queue_draw_area(x, y, w, h)
def draw_balloon(self, cr, b):
x = int(b.x)
y = int(b.y)
# Draw the string.
cr.set_source_rgb(0, 0, 0)
cr.move_to(int(b.x), int(b.y + b.size / 2))
cr.line_to(int(b.x), int(b.y + b.size))
cr.stroke()
# Draw the balloon.
cr.save()
cr.set_source_rgb(b.color[0], b.color[1], b.color[2])
cr.arc(b.x, b.y, b.size / 2, 0, 2 * math.pi)
cr.fill()
cr.restore()
cr.set_source_rgb(0, 0, 0)
pango_layout = PangoCairo.create_layout(cr)
fd = Pango.FontDescription('Sans')
fd.set_size(12 * Pango.SCALE)
pango_layout.set_font_description(fd)
pango_layout.set_text(b.word, len(b.word))
size = pango_layout.get_size()
x = x - (size[0] / Pango.SCALE) / 2
y = y - (size[1] / Pango.SCALE) / 2
cr.move_to(x, y)
PangoCairo.update_layout(cr, pango_layout)
PangoCairo.show_layout(cr, pango_layout)
def add_score(self, num):
self.score += num
self.queue_draw_score()
def queue_draw_score(self):
layout = self.area.create_pango_layout(_('SCORE: %d') % self.score)
layout.set_font_description(Pango.FontDescription('Times 14'))
size = layout.get_size()
x = self.bounds.width-20-size[0]/Pango.SCALE
y = 20
self.queue_draw_area(x, y, x+size[0], y+size[1])
def draw_score(self, cr):
cr.set_source_rgb(0, 0, 0)
pango_layout = PangoCairo.create_layout(cr)
fd = Pango.FontDescription('Times')
fd.set_size(14 * Pango.SCALE)
pango_layout.set_font_description(fd)
text = _('SCORE: %d') % self.score
pango_layout.set_text(text, len(text))
size = pango_layout.get_size()
x = self.bounds.width - 20 - size[0] / Pango.SCALE
y = 20
cr.move_to(x, y)
PangoCairo.update_layout(cr, pango_layout)
PangoCairo.show_layout(cr, pango_layout)
def draw_instructions(self, cr):
# Draw instructions.
cr.set_source_rgb(0, 0, 0)
pango_layout = PangoCairo.create_layout(cr)
pango_layout.set_font_description(Pango.FontDescription('Times 14'))
text = _('Type the words to pop the balloons!')
pango_layout.set_text(text, len(text))
size = pango_layout.get_size()
x = (self.bounds.width - size[0] / Pango.SCALE) / 2
y = self.bounds.height - 20 - size[1] / Pango.SCALE
cr.move_to(x, y)
PangoCairo.update_layout(cr, pango_layout)
PangoCairo.show_layout(cr, pango_layout)
def draw(self, cr):
self.bounds = self.area.get_allocation()
# Draw background.
cr.set_source_rgb(0.915, 0.915, 1)
cr.rectangle(0, 0, self.bounds.width, self.bounds.height)
cr.fill()
# Draw the balloons.
for b in self.balloons:
self.draw_balloon(cr, b)
if self.finished:
self.draw_results(cr)
else:
self.draw_instructions(cr)
self.draw_score(cr)
def draw_cb(self, area, cr):
self.draw(cr)
| godiard/typing-turtle-activity | balloongame.py | Python | gpl-3.0 | 13,291 |
"""
Created on Thu Jan 31 2018
Unit tests for the Balance game
@author: IvanPopov
"""
import unittest
from game import Game
class GameTest(unittest.TestCase):
def test_game_loads(self):
g=Game()
self.assertEqual(g.c.title(), "Balance") | ipopov13/Balance | legacy_code/tests.py | Python | gpl-3.0 | 259 |
import os, sys, re, argparse, time, json, logging
import requests
from glob import glob
from urlparse import urlsplit
from getpass import getpass
from mastodon import Mastodon
from markdown import markdown
from html_text import extract_text
from flask import (Flask, render_template, abort,
request, redirect, jsonify)
DEBUG = False # If it ain't broke, don't debug it.
NO_TOOTING = False # Handy during debug: create gist, but don't toot.
RE_HASHTAG = re.compile(u'(?:^|(?<=\s))#(\\w+)')
RE_MENTION = re.compile(u'(?:^|(?<=\s))@(\\w+)@([\\w.]+)')
def get_hashtags(s, ignore=None):
tags = set(
['#'+tag.lower() for tag in RE_HASHTAG.findall(s)])
if ignore:
tags -= get_hashtags(ignore)
return tags
def linkify_hashtags(s, instance):
return RE_HASHTAG.sub(
lambda m:
u"[#{tag}](https://{instance}/tags/{tag})".format(
tag=m.group(1), instance=instance),
s)
def get_mentions(s, ignore=None):
mentions = set(
[u"@{}@{}".format(user,instance)
for user, instance in RE_MENTION.findall(s)])
if ignore:
mentions -= get_mentions(ignore)
return mentions
def linkify_mentions(s):
return RE_MENTION.sub(
lambda m:
u"[@{user}](https://{instance}/@{user})".format(
user=m.group(1), instance=m.group(2)),
s)
def url2toot(masto, url):
u = urlsplit(url)
if not (u.scheme=='https' and u.netloc and u.path):
return None # Don't bother the instance
res = masto.search(url, True)
res = res.get('statuses',[])
return res and res[0] or None
def make_gist(title, body):
return requests.post(
"https://api.github.com/gists",
json={
"description": title,
"public": True,
"files": {
"TOOT.md": {
"content": u"### {}\n\n{}".format(title, body)
}
}
}
).json()['html_url']+"#file-toot-md"
def post(masto, body, instance, title=None,
direction='ltr', in_reply_to=None):
# Markdown more than we need, to [hopefully] discard chopped markup.
summary = extract_text(markdown(body.strip()))[:140]
hashtags = get_hashtags(body, ignore=summary)
mentions = get_mentions(body, ignore=summary)
irt_id = in_reply_to and in_reply_to.get('id') or None
body = linkify_hashtags(linkify_mentions(body), instance)
if direction=='rtl':
body = u"""<div dir="rtl">
{}
</div>""".format(markdown(body))
if in_reply_to:
body = u"""#### In reply to [@{}]({}):
{}""".format(
in_reply_to['account']['username'],
in_reply_to['url'], body)
gist = make_gist(
title or u"A gistodon toot, {} GMT".format(
time.asctime(time.gmtime())),
body+u"""
###### Generated by [Gistodon](https://github.com/thedod/gistodon/#readme).""")
if NO_TOOTING:
return gist
status = u'{}... {}'.format(summary, gist)
if hashtags or mentions:
status += u'\n'+u' '.join(hashtags.union(mentions))
return masto.status_post(
status, spoiler_text=title, in_reply_to_id=irt_id)['url']
def webserver(masto, instance, account):
app = Flask(__name__, static_url_path='')
@app.route('/')
def index():
re = request.args.get('re','')
return render_template('index.html', account=account,
re=re)
@app.route('/toot', methods=['POST'])
def tootit():
if not request.form['markdown'].strip():
return "Nothing to toot"
in_reply_to=request.form.get('re')
if in_reply_to:
in_reply_to = url2toot(masto, in_reply_to)
if not in_reply_to:
abort(500, 'The "in reply to" url is not a toot.')
return redirect(post(
masto, request.form['markdown'], instance,
title=request.form['title'],
in_reply_to=in_reply_to,
direction=request.form['direction']))
@app.route('/re', methods=['GET', 'POST'])
def tootsearch():
return jsonify(url2toot(masto,
request.form.get('q', request.args.get('q',''))))
@app.route('/search', methods=['GET', 'POST'])
def search():
q = request.form.get(
'q', request.args.get('q','')).strip()
if not q:
return jsonify([])
res = masto.search(q, True)
return jsonify(sorted(
[
{
# This trick makes sure both local and external
# accounts get a @hostname suffix.
"value": "@{}@{}".format(
a["username"], urlsplit(a["url"]).netloc),
"title": a.get("display_name")
} for a in res.get('accounts',[])]+ \
[{"value": '#'+a} for a in res.get('hashtags',[])],
key=lambda s: s['value'].lower()))
app.run(host='localhost', port=8008, debug=DEBUG)
def main():
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
parser = argparse.ArgumentParser(
description=("Toot stdin as a gist [markdown is supported],"
" or launch a localhost web interface."))
parser.add_argument('-i', '--instance',
help='Your mastodon instance (e.g. mastodon.social).')
parser.add_argument('-e', '--email',
help='The email address you login to that instance with.')
parser.add_argument('-a', '--app_name', default='Gistodon',
help=('Name for the app (default is Gistodon).'
' Appears below the toot, near the date.'))
parser.add_argument('-w', '--web', action="store_true",
help=("Run as a web server on localhost"
" (toot-specific --title, --re, and --rtl"
" are ignored)."))
parser.add_argument('-t', '--title',
help="Optional: gist's title, and the toot's content warning (CW).")
parser.add_argument('-r', '--re',
help="Optional: url of the toot you're replying to.")
parser.add_argument('--rtl', dest='direction', action='store_const',
const='rtl', default='ltr',
help=("Format the gist as right-to-left text."))
args = parser.parse_args()
instance = args.instance
if instance:
client_cred_filename = '{}.{}.client.secret'.format(args.app_name, args.instance)
else:
candidates = glob('{}.*.client.secret'.format(args.app_name))
assert candidates, "No app/user registered. Please run register.sh first."
client_cred_filename = candidates[0]
instance = client_cred_filename[len(args.app_name)+1:-len('.client.secret')]
email = args.email
if email:
user_cred_filename = '{}.{}.{}.user.secret'.format(
args.app_name, instance, email.replace('@','.'))
else:
candidates = glob('{}.{}.*.user.secret'.format(
args.app_name, instance))
assert len(candidates), \
"No user registered for {} at {}. Please run register.sh first.".format(
args.app_name, instance)
user_cred_filename = candidates[0]
assert \
os.path.exists(client_cred_filename) and \
os.path.exists(user_cred_filename), \
"App/user not registered. Please run register.sh"
logging.info("Connecting to {}...".format(instance))
masto = Mastodon(
client_id = client_cred_filename,
access_token = user_cred_filename,
api_base_url = 'https://'+instance)
if args.web:
account = masto.account_verify_credentials()
webserver(masto, instance, account)
else:
logging.info("Reading markdown from standard input...")
lines = [unicode(l,'utf-8') for l in sys.stdin.readlines()]
assert len(filter(lambda l: l.strip(), lines)), \
"Empty toot."
body = u'\n'.join(lines)
assert not args.title or len(args.title)<=80, "Title exceeds 80 characters"
if args.re:
irt = url2toot(masto, args.re)
assert irt, "not a toot's url: {}".format(args.re)
else:
irt = None
title = args.title
try:
title = unicode(title,'utf-8')
except TypeError:
pass # Either Null, or already unicode(?!?)
logging.info("Posted {}.".format(post(
masto, body, instance,
title=title, direction=args.direction, in_reply_to=irt)))
if __name__=='__main__':
main()
| thedod/gistodon | gistodon.py | Python | gpl-3.0 | 8,763 |
import time
import os
import json
import requests
# Plan is to import and to checkout dependencies:
| pi-bot/pibot-pkg | tests/dependencies.py | Python | gpl-3.0 | 100 |
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from app import db
import os.path
db.create_all()
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database_repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO)) | CCS-Tech/duck-blocks | db_create.py | Python | gpl-3.0 | 475 |
from troposphere import Tags,FindInMap, Ref, Template, Parameter,ImportValue, Ref, Output
from troposphere.efs import FileSystem, MountTarget
from troposphere.ec2 import SecurityGroup, SecurityGroupRule, Instance, Subnet
from create import export_ref, import_ref
from create.network import AclFactory, assoc_nacl_subnet
def efs_setup(template, ops, app_cfn_options, stack_name, stack_setup):
# Variable Declarations
vpc_id=ops.get('vpc_id')
efs_sg = app_cfn_options.network_names['tcpstacks'][stack_name]['sg_name']
efs_acl = app_cfn_options.network_names['tcpstacks'][stack_name]['nacl_name']
# Create EFS FIleSystem
efs_fs=FileSystem(
title='{}{}'.format(ops.app_name, stack_name),
FileSystemTags=Tags(Name='{}-{}'.format(ops.app_name, stack_name))
)
template.add_resource(efs_fs)
export_ref(template, '{}{}{}'.format(ops.app_name,stack_name,"Endpoint"), value=Ref(efs_fs), desc="Endpoint for EFS FileSystem")
# EFS FS Security Groups
efs_security_group=SecurityGroup(
title=efs_sg,
GroupDescription='Allow Access',
VpcId=vpc_id,
Tags=Tags(Name=efs_sg)
)
template.add_resource(efs_security_group)
export_ref(template, efs_sg, value=Ref(efs_sg), desc="Export for EFS Security Group")
# Create Network ACL for EFS Stack
efs_nacl = AclFactory(
template,
name=efs_acl,
vpc_id=ops.vpc_id,
in_networks=[val for key, val in sorted(ops.app_networks.items())],
in_ports=stack_setup['ports'],
out_ports=ops.out_ports,
out_networks=[val for key, val in sorted(ops.app_networks.items())],
ssh_hosts=ops.get("deploy_hosts"),
)
export_ref(
template,
export_name=efs_acl,
value=Ref(efs_acl),
desc="{}{} stack".format("NetACL for", stack_name)
)
# Create Subnets for Mount Targets
for k, v in ops['tcpstacks']['EFS']['networks'].items():
efs_subnet=Subnet(
title='{}{}{}{}'.format(ops.app_name, stack_name, "MountTargetSubnet", k.split("-")[-1]),
AvailabilityZone=k,
CidrBlock=v,
VpcId=vpc_id,
Tags=Tags(Name='{}-{}-{}-{}'.format(ops.app_name, stack_name, "MountTargetSubnet", k.split("-")[-1]))
)
template.add_resource(efs_subnet)
assoc_name = '{}{}{}'.format(stack_name,"AclAssoc",k.split("-")[-1])
assoc_nacl_subnet(template, assoc_name, Ref(efs_acl), Ref(efs_subnet))
efs_mount_target=MountTarget(
title='{}{}{}'.format(ops.app_name, "EFSMountTarget", k.split("-")[-1]),
FileSystemId=Ref(efs_fs),
SecurityGroups=[Ref(efs_security_group)],
SubnetId=Ref(efs_subnet)
)
template.add_resource(efs_mount_target)
| gotropo/gotropo | create/efs.py | Python | gpl-3.0 | 2,807 |
# Snap! Configuration Manager
#
# (C) Copyright 2011 Mo Morsi ([email protected])
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, Version 3,
# as published by the Free Software Foundation
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import os
import os.path
import optparse, ConfigParser
import snap
from snap.options import *
from snap.snapshottarget import SnapshotTarget
from snap.exceptions import ArgError
class ConfigOptions:
"""Container holding all the configuration options available
to the Snap system"""
# modes of operation
RESTORE = 0
BACKUP = 1
def __init__(self):
'''initialize configuration'''
# mode of operation
self.mode = None
# mapping of targets to lists of backends to use when backing up / restoring them
self.target_backends = {}
# mapping of targets to lists of entities to include when backing up
self.target_includes = {}
# mapping of targets to lists of entities to exclude when backing up
self.target_excludes = {}
# output log level
# currently supports 'quiet', 'normal', 'verbose', 'debug'
self.log_level = 'normal'
# output format to backup / restore
self.outputformat = 'snapfile'
# location of the snapfile to backup to / restore from
self.snapfile = None
# Encryption/decryption password to use, if left as None, encryption will be disabled
self.encryption_password = None
# hash of key/value pairs of service-specific options
self.service_options = {}
for backend in SnapshotTarget.BACKENDS:
self.target_backends[backend] = False
self.target_includes[backend] = []
self.target_excludes[backend] = []
def log_level_at_least(self, comparison):
return (comparison == 'quiet') or \
(comparison == 'normal' and self.log_level != 'quiet') or \
(comparison == 'verbose' and (self.log_level == 'verbose' or self.log_level == 'debug')) or \
(comparison == 'debug' and self.log_level == 'debug')
class ConfigFile:
"""Represents the snap config file to be read and parsed"""
parser = None
def __init__(self, config_file):
'''
Initialize the config file, specifying its path
@param file - the path to the file to load
'''
# if config file doesn't exist, just ignore
if not os.path.exists(config_file):
if snap.config.options.log_level_at_least("verbose"):
snap.callback.snapcallback.warn("Config file " + config_file + " not found")
else:
self.parser = ConfigParser.ConfigParser()
self.parser.read(config_file)
self.__parse()
def string_to_bool(string):
'''Static helper to convert a string to a boolean value'''
if string == 'True' or string == 'true' or string == '1':
return True
elif string == 'False' or string == 'false' or string == '0':
return False
return None
string_to_bool = staticmethod(string_to_bool)
def string_to_array(string):
'''Static helper to convert a colon deliminated string to an array of strings'''
return string.split(':')
string_to_array = staticmethod(string_to_array)
def __get_bool(self, key, section='main'):
'''
Retreive the indicated boolean value from the config file
@param key - the string key corresponding to the boolean value to retrieve
@param section - the section to retrieve the value from
@returns - the value or False if not found
'''
try:
return ConfigFile.string_to_bool(self.parser.get(section, key))
except:
return None
def __get_string(self, key, section='main'):
'''
Retreive the indicated string value from the config file
@param key - the string key corresponding to the string value to retrieve
@param section - the section to retrieve the value from
@returns - the value or None if not found
'''
try:
return self.parser.get(section, key)
except:
return None
def __get_array(self, section='main'):
'''return array of key/value pairs from the config file section
@param section - the section which to retrieve the key / values from
@returns - the array of key / value pairs or None if not found
'''
try:
return self.parser.items(section)
except:
return None
def __parse(self):
'''parse configuration out of the config file'''
for backend in SnapshotTarget.BACKENDS:
val = self.__get_bool(backend)
if val is not None:
snap.config.options.target_backends[backend] = val
else:
val = self.__get_string(backend)
if val:
snap.config.options.target_backends[backend] = True
val = ConfigFile.string_to_array(val)
for include in val:
if include[0] == '!':
snap.config.options.target_excludes[backend].append(include[1:])
else:
snap.config.options.target_includes[backend].append(include)
else:
val = self.__get_bool('no' + backend)
if val:
snap.config.options.target_backends[backend] = False
of = self.__get_string('outputformat')
sf = self.__get_string('snapfile')
ll = self.__get_string('loglevel')
enp = self.__get_string('encryption_password')
if of != None:
snap.config.options.outputformat = of
if sf != None:
snap.config.options.snapfile = sf
if ll != None:
snap.config.options.log_level = ll
if enp != None:
snap.config.options.encryption_password = enp
services = self.__get_array('services')
if services:
for k, v in services:
snap.config.options.service_options[k] = v
class Config:
"""The configuration manager, used to set and verify snap config values
from the config file and command line. Primary interface to the
Configuration System"""
configoptions = None
parser = None
# read values from the config files and set them in the target ConfigOptions
def read_config(self):
# add conf stored in resources if running from local checkout
CONFIG_FILES.append(os.path.join(os.path.dirname(__file__), "..", "resources", "snap.conf"))
for config_file in CONFIG_FILES:
ConfigFile(config_file)
def parse_cli(self):
'''
parses the command line an set them in the target ConfigOptions
'''
usage = "usage: %prog [options] arg"
self.parser = optparse.OptionParser(usage, version=SNAP_VERSION)
self.parser.add_option('', '--restore', dest='restore', action='store_true', default=False, help='Restore snapshot')
self.parser.add_option('', '--backup', dest='backup', action='store_true', default=False, help='Take snapshot')
self.parser.add_option('-l', '--log-level', dest='log_level', action='store', default="normal", help='Log level (quiet, normal, verbose, debug)')
self.parser.add_option('-o', '--outputformat', dest='outputformat', action='store', default=None, help='Output file format')
self.parser.add_option('-f', '--snapfile', dest='snapfile', action='store', default=None, help='Snapshot file, use - for stdout')
self.parser.add_option('-p', '--password', dest='encryption_password', action='store', default=None, help='Snapshot File Encryption/Decryption Password')
# FIXME how to permit parameter lists for some of these
for backend in SnapshotTarget.BACKENDS:
self.parser.add_option('', '--' + backend, dest=backend, action='store_true', help='Enable ' + backend + ' snapshots/restoration')
self.parser.add_option('', '--no' + backend, dest=backend, action='store_false', help='Disable ' + backend + ' snapshots/restoration')
(options, args) = self.parser.parse_args()
if options.restore != False:
snap.config.options.mode = ConfigOptions.RESTORE
if options.backup != False:
snap.config.options.mode = ConfigOptions.BACKUP
if options.log_level:
snap.config.options.log_level = options.log_level
if options.outputformat != None:
snap.config.options.outputformat = options.outputformat
if options.snapfile != None:
snap.config.options.snapfile = options.snapfile
if options.encryption_password != None:
snap.config.options.encryption_password = options.encryption_password
for backend in SnapshotTarget.BACKENDS:
val = getattr(options, backend)
if val != None:
if type(val) == str:
snap.config.options.target_backends[backend] = True
val = ConfigFile.string_to_array(val)
for include in val:
if include[0] == '!':
snap.config.options.target_excludes[backend].append(include[1:])
else:
snap.config.options.target_includes[backend].append(include)
else:
snap.config.options.target_backends[backend] = val
def verify_integrity(self):
'''
verify the integrity of the current option set
@raises - ArgError if the options are invalid
'''
if snap.config.options.mode == None: # mode not specified
raise snap.exceptions.ArgError("Must specify backup or restore")
if snap.config.options.snapfile == None: # need to specify snapfile location
raise snap.exceptions.ArgError("Must specify snapfile")
# TODO verify output format is one of permitted types
if snap.config.options.outputformat == None: # need to specify output format
raise snap.exceptions.ArgError("Must specify valid output format")
# static shared options
options = ConfigOptions()
| movitto/snap | snap/config.py | Python | gpl-3.0 | 10,759 |
# Peerz - P2P python library using ZeroMQ sockets and gevent
# Copyright (C) 2014-2015 Steve Henderson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
from transitions import Machine
class MessageState(object):
states = ['initialised', 'waiting response', 'complete', 'timedout']
transitions = [
{'trigger': 'query', 'source': 'initialised', 'dest': 'waiting response', 'before': '_update', 'after': '_send_query'},
{'trigger': 'response', 'source': 'waiting response', 'dest': 'complete', 'before': '_update', 'after': '_completed'},
{'trigger': 'timeout', 'source': '*', 'dest': 'timedout', 'before': '_update', 'after': '_completed', },
]
def __init__(self, engine, txid, msg, callback=None, max_duration=5000, max_concurrency=3):
self.engine = engine
self.callback = callback
self.machine = Machine(model=self,
states=self.states,
transitions=self.transitions,
initial='initialised')
self.start = self.last_change = time.time() * 1000
self.max_duration = max_duration
self.max_concurrency = max_concurrency
self.txid = txid
self.times = {}
self.parse_message(msg)
self.query()
def query(self):
pass
def parse_message(self, msg):
self.val = msg.pop(0)
def is_complete(self):
return self.state in ['complete', 'timedout']
def pack_request(self):
return None
@staticmethod
def unpack_response(content):
return None
@staticmethod
def pack_response(content):
return None
def _update(self):
now = time.time() * 1000
self.times.setdefault(self.state, 0.0)
self.times[self.state] += (now - self.last_change)
self.last_change = now
def duration(self):
return time.time() * 1000 - self.start
def latency(self):
return self.times.setdefault('waiting response', 0.0)
def _send_query(self):
pass
def _completed(self):
pass
| shendo/peerz | peerz/messaging/base.py | Python | gpl-3.0 | 2,720 |
from essence3.util import clamp
class Align(object):
def __init__(self, h, v = None):
self.h = h
self.v = h if v is None else v
def __call__(self, node, edge):
if edge in ('top', 'bottom'):
return node.width * self.h
if edge in ('left', 'right'):
return node.height * self.v
class FlowAlign(object):
def __init__(self, h, v = None):
self.h = h
self.v = h if v is None else v
def __call__(self, node, edge):
if edge in ('top', 'bottom'):
return node.flowline(edge, self.h)
if edge in ('left', 'right'):
return node.flowline(edge, self.v)
def flow_simple(node, (low, high), edge, which):
if which == 0:
return low + node.offset1[0] + node[0].flowline(edge, which)
if which == 2:
return low + node.offset1[-2] + node[-1].flowline(edge, which)
i = len(node) / 2
if which == 1:
if len(node) % 2 == 1:
return low + node.offset1[i] + node[i].flowline(edge, which)
else:
return low + (node.offset0[i] + node.offset1[i])*0.5
class Box(object):
def __init__(self, (left, top, width, height), style):
self.left = left
self.top = top
self.width = width
self.height = height
self.style = style
def flowline(self, edge, which):
if edge in ('top', 'bottom'):
return self.width * (0.0, 0.5, 1.0)[which]
if edge in ('left', 'right'):
return self.height * (0.0, 0.5, 1.0)[which]
def measure(self, parent):
pass
def arrange(self, parent, (left,top)):
self.left = left
self.top = top
def render(self):
background = self.style['background']
if background:
background(self)
def pick(self, (x,y), hits):
return hits
def subintrons(self, res):
return res
def traverse(self, res, cond):
if cond(self):
res.append(self)
return res
class Slate(Box):
def __init__(self, (width, height), style):
Box.__init__(self, (0, 0, width, height), style)
class Label(Box):
def __init__(self, source, style):
self.source = source
Box.__init__(self, (0, 0, 0, 0), style)
self.offsets = None
def flowline(self, edge, which):
left, top, right, bottom = self.style['padding']
if edge in ('top', 'bottom'):
return self.width * (0.0, 0.5, 1.0)[which] + left
if edge in ('left', 'right'):
if which == 0:
return top
if which == 1:
return top + self.style['font'].mathline * self.style['font_size']
if which == 2:
return top + self.style['font'].baseline * self.style['font_size']
def measure(self, parent):
left, top, right, bottom = self.style['padding']
self.offsets = self.style['font'].measure(self.source, self.style['font_size'])
self.width = left + right + self.offsets[-1]
self.height = top + bottom + self.style['font'].lineheight * self.style['font_size']
def arrange(self, parent, (left,top)):
self.left = left
self.top = top
def render(self):
background = self.style['background']
if background:
background(self)
self.style['font'](self)
def selection_rect(self, start, stop):
left, top, right, bottom = self.style['padding']
x0 = self.offsets[start]
x1 = self.offsets[stop]
return (self.left + left + x0 - 1, self.top, x1-x0 + 2, self.height)
def scan_offset(self, (x,y)):
left, top, right, bottom = self.style['padding']
x -= self.left + left
k = 0
best = abs(x - 0)
for index, offset in enumerate(self.offsets):
v = abs(x - offset)
if v <= best:
best = v
k = index
return k, best ** 2.0 + abs(y - clamp(self.top, self.top + self.height, y)) ** 2.0
class Container(Box):
def __init__(self, nodes, style):
self.nodes = nodes
self.offset0 = [0] * (len(nodes) + 1)
self.offset1 = [0] * (len(nodes) + 1)
self.flow0 = [0] * len(nodes)
self.flow1 = [0] * len(nodes)
self.base0 = 0
self.base1 = 0
Box.__init__(self, (0, 0, 0, 0), style)
def __getitem__(self, i):
return self.nodes[i]
def __iter__(self):
return iter(self.nodes)
def __len__(self):
return len(self.nodes)
def render(self):
background = self.style['background']
if background:
background(self)
for node in self:
node.render()
def pick(self, (x,y), hits):
for node in self:
res = node.pick((x,y), hits)
return hits
def subintrons(self, res):
for node in self:
res = node.subintrons(res)
return res
def traverse(self, res, cond):
if cond(self):
res.append(self)
for node in self:
res = node.traverse(res, cond)
return res
class HBox(Container):
def flowline(self, edge, which):
left, top, right, bottom = self.style['padding']
if edge == 'left':
return top + self.base0 - self.flow0[0] + self[0].flowline(edge, which)
elif edge == 'right':
return top + self.base1 - self.flow1[-1] + self[-1].flowline(edge, which)
else:
return self.style['flow'](self, (left, self.width-right), edge, which)
def measure(self, parent):
offset = cap = 0
low = org = high = 0
for i, node in enumerate(self):
node.measure(self)
self.offset0[i] = cap
self.offset1[i] = offset
self.flow0[i] = f0 = self.style['align'](node, 'left')
self.flow1[i] = f1 = self.style['align'](node, 'right')
low = min(low, 0 - f0)
high = max(high, node.height - f0)
low += f0 - f1
org += f0 - f1
high += f0 - f1
cap = offset + node.width
offset += node.width + self.style['spacing']
self.offset0[len(self)] = self.offset1[len(self)] = cap
self.base0 = org - low
self.base1 = 0 - low
left, top, right, bottom = self.style['padding']
self.width = cap + left + right
self.height = high - low + top + bottom
def arrange(self, parent, (left,top)):
self.left = left
self.top = top
left, top, right, bottom = self.style['padding']
base_x = self.left + left
base_y = self.base0 + self.top + top
for i, node in enumerate(self):
node.arrange(self, (base_x + self.offset1[i], base_y - self.flow0[i]))
base_y += self.flow1[i] - self.flow0[i]
def get_spacer(self, i):
left, top, right, bottom = self.style['padding']
x0 = self.offset0[i]
x1 = self.offset1[i]
return self.left + left+x0, self.top + top, x1-x0, self.height-bottom-top
class VBox(Container):
def flowline(self, edge, which):
left, top, right, bottom = self.style['padding']
if edge == 'top':
return left + self.base0 - self.flow0[0] + self[0].flowline(edge, which)
elif edge == 'bottom':
return left + self.base1 - self.flow1[-1] + self[-1].flowline(edge, which)
else:
return self.style['flow'](self, (top, self.height-bottom), edge, which)
def measure(self, parent):
offset = cap = 0
low = org = high = 0
for i, node in enumerate(self):
node.measure(self)
self.offset0[i] = cap
self.offset1[i] = offset
self.flow0[i] = f0 = self.style['align'](node, 'top')
self.flow1[i] = f1 = self.style['align'](node, 'bottom')
low = min(low, 0 - f0)
high = max(high, node.width - f0)
low += f0 - f1
org += f0 - f1
high += f0 - f1
cap = offset + node.height
offset += node.height + self.style['spacing']
self.offset0[len(self)] = self.offset1[len(self)] = cap
self.base0 = org - low
self.base1 = 0 - low
left, top, right, bottom = self.style['padding']
self.height = cap + top + bottom
self.width = high - low + left + right
def arrange(self, parent, (left,top)):
self.left = left
self.top = top
left, top, right, bottom = self.style['padding']
base_x = self.base0 + self.left + left
base_y = self.top + top
for i, node in enumerate(self):
node.arrange(self, (base_x - self.flow0[i], base_y + self.offset1[i]))
base_x += self.flow1[i] - self.flow0[i]
def get_spacer(self, i):
left, top, right, bottom = self.style['padding']
y0 = self.offset0[i]
y1 = self.offset1[i]
return self.left + left, self.top + y0+top, self.width - right-left, y1-y0
class Intron(Box):
def __init__(self, source, index, generator):
self.source = source
self.index = index
self.generator = generator
self.rebuild()
def rebuild(self):
self.node, self.style = self.generator(self.source)
def flowline(self, edge, which):
left, top, right, bottom = self.style['padding']
if edge in ('left', 'right'):
x0 = top
if edge in ('top', 'bottom'):
x0 = left
return x0 + self.node.flowline(edge, which)
def measure(self, parent):
left, top, right, bottom = self.style['padding']
min_width = self.style['min_width']
min_height = self.style['min_height']
self.node.measure(self)
self.width = max(min_width, self.node.width + left + right)
self.height = max(min_height, self.node.height + top + bottom)
def arrange(self, parent, (left, top)):
self.left = left
self.top = top
left, top, right, bottom = self.style['padding']
inner_width = self.width - left - right
inner_height = self.height - top - bottom
x = self.left + left + (inner_width - self.node.width)*0.5
y = self.top + top + (inner_height - self.node.height)*0.5
self.node.arrange(self, (x,y))
def render(self):
background = self.style['background']
if background:
background(self)
self.node.render()
def pick(self, (x,y), hits=None):
if hits == None:
hits = []
if 0 <= x - self.left < self.width and 0 <= y - self.top < self.height:
hits.append(self)
return self.node.pick((x,y), hits)
def subintrons(self, res=None):
if res == None:
return self.node.subintrons([])
else:
res.append(self)
return res
def find_context(self, intron):
if intron == self:
return ()
for subintron in self.subintrons():
match = subintron.find_context(intron)
if match is not None:
return (self,) + match
def traverse(self, res, cond):
if cond(self):
res.append(self)
return self.node.traverse(res, cond)
def scan_offset(self, (x,y)):
left = self.left
right = self.left + self.width
top = self.top
bottom = self.top + self.height
b0 = (x - left)**2 + (y - top)**2
b1 = (x - right)**2 + (y - bottom)**2
b = (x - clamp(left, right, x))**2 + (y - clamp(top, bottom, y))**2
if b0 < b1:
return self.index, b
else:
return self.index+1, b
def solve(root, (left, top)):
root.measure(None)
root.arrange(None, (left, top))
| cheery/essence | essence3/layout.py | Python | gpl-3.0 | 11,955 |
# Copyright (C) 2018-2019 Matthias Klumpp <[email protected]>
#
# Licensed under the GNU Lesser General Public License Version 3
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the license, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import logging as log
from glob import glob
from laniakea import LkModule
from laniakea.dud import Dud
from laniakea.utils import get_dir_shorthand_for_uuid, random_string
from laniakea.db import session_scope, Job, JobResult, JobKind, SourcePackage
from laniakea.msgstream import EventEmitter
from .rubiconfig import RubiConfig
from .utils import safe_rename
def accept_upload(conf, dud, event_emitter):
'''
Accept the upload and move its data to the right places.
'''
job_success = dud.get('X-Spark-Success') == 'Yes'
job_id = dud.get('X-Spark-Job')
# mark job as accepted and done
with session_scope() as session:
job = session.query(Job).filter(Job.uuid == job_id).one_or_none()
if not job:
log.error('Unable to mark job \'{}\' as done: The Job was not found.'.format(job_id))
# this is a weird situation, there is no proper way to handle it as this indicates a bug
# in the Laniakea setup or some other oddity.
# The least harmful thing to do is to just leave the upload alone and try again later.
return
job.result = JobResult.SUCCESS if job_success else JobResult.FAILURE
job.latest_log_excerpt = None
# move the log file and Firehose reports to the log storage
log_target_dir = os.path.join(conf.log_storage_dir, get_dir_shorthand_for_uuid(job_id))
firehose_target_dir = os.path.join(log_target_dir, 'firehose')
for fname in dud.get_files():
if fname.endswith('.log'):
os.makedirs(log_target_dir, exist_ok=True)
# move the logfile to its destination and ensure it is named correctly
target_fname = os.path.join(log_target_dir, job_id + '.log')
safe_rename(fname, target_fname)
elif fname.endswith('.firehose.xml'):
os.makedirs(firehose_target_dir, exist_ok=True)
# move the firehose report to its own directory and rename it
fh_target_fname = os.path.join(firehose_target_dir, job_id + '.firehose.xml')
safe_rename(fname, fh_target_fname)
# handle different job data
if job.module == LkModule.ISOTOPE:
from .import_isotope import handle_isotope_upload
handle_isotope_upload(session,
success=job_success,
conf=conf,
dud=dud,
job=job,
event_emitter=event_emitter)
elif job.kind == JobKind.PACKAGE_BUILD:
# the package has been imported by Dak, so we just announce this
# event to the world
spkg = session.query(SourcePackage) \
.filter(SourcePackage.source_uuid == job.trigger) \
.filter(SourcePackage.version == job.version) \
.one_or_none()
if spkg:
suite_target_name = '?'
if job.data:
suite_target_name = job.data.get('suite', '?')
event_data = {'pkgname': spkg.name,
'version': job.version,
'architecture': job.architecture,
'suite': suite_target_name,
'job_id': job_id}
if job_success:
event_emitter.submit_event_for_mod(LkModule.ARCHIVE, 'package-build-success', event_data)
else:
event_emitter.submit_event_for_mod(LkModule.ARCHIVE, 'package-build-failed', event_data)
else:
event_emitter.submit_event('upload-accepted', {'job_id': job_id, 'job_failed': not job_success})
# remove the upload description file from incoming
os.remove(dud.get_dud_file())
log.info("Upload {} accepted.", dud.get_filename())
def reject_upload(conf, dud, reason='Unknown', event_emitter=None):
'''
If a file has issues, we reject it and put it into the rejected queue.
'''
os.makedirs(conf.rejected_dir, exist_ok=True)
# move the files referenced by the .dud file
random_suffix = random_string(4)
for fname in dud.get_files():
target_fname = os.path.join(conf.rejected_dir, os.path.basename(fname))
if os.path.isfile(target_fname):
target_fname = target_fname + '+' + random_suffix
# move the file to the rejected dir
safe_rename(fname, target_fname)
# move the .dud file itself
target_fname = os.path.join(conf.rejected_dir, dud.get_filename())
if os.path.isfile(target_fname):
target_fname = target_fname + '+' + random_suffix
safe_rename(dud.get_dud_file(), target_fname)
# also store the reject reason for future reference
with open(target_fname + '.reason', 'w') as f:
f.write(reason + '\n')
log.info('Upload {} rejected.', dud.get_filename())
if event_emitter:
event_emitter.submit_event('upload-rejected', {'dud_filename': dud.get_filename(), 'reason': reason})
def import_files_from(conf, incoming_dir):
'''
Import files from an untrusted incoming source.
IMPORTANT: We assume that the uploader can not edit their files post-upload.
If they could, we would be vulnerable to timing attacks here.
'''
emitter = EventEmitter(LkModule.RUBICON)
for dud_file in glob(os.path.join(incoming_dir, '*.dud')):
dud = Dud(dud_file)
try:
dud.validate(keyrings=conf.trusted_gpg_keyrings)
except Exception as e:
reason = 'Signature validation failed: {}'.format(str(e))
reject_upload(conf, dud, reason, emitter)
continue
# if we are here, the file is good to go
accept_upload(conf, dud, emitter)
def import_files(options):
conf = RubiConfig()
if not options.incoming_dir:
print('No incoming directory set. Can not process any files.')
sys.exit(1)
import_files_from(conf, options.incoming_dir)
| lkorigin/laniakea | src/rubicon/rubicon/fileimport.py | Python | gpl-3.0 | 6,906 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-07-30 01:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0008_contactphone_place_on_header'),
]
operations = [
migrations.AddField(
model_name='contactemail',
name='place_on_header',
field=models.BooleanField(default=False, verbose_name='Размещать в заголовке'),
),
migrations.AddField(
model_name='contactperson',
name='photo',
field=models.ImageField(blank=True, null=True, upload_to='', verbose_name='Фото'),
),
]
| andrius-momzyakov/grade | web/migrations/0009_auto_20170730_0156.py | Python | gpl-3.0 | 735 |
# Authors: John Dennis <[email protected]>
#
# Copyright (C) 2011 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Quick Start Guide For Using This Module
=======================================
This module implements a Log Manager class which wraps the Python
logging module and provides some utility functions for use with
logging. All logging operations should be done through the
`LogManager` where available. *DO NOT create objects using the
Python logging module, the log manager will be unaware of them.*
This module was designed for ease of use while preserving advanced
functionality and performance. You must perform the following steps.
1. Import the log_manger module and instantiate *one* `LogManager`
instance for your application or library. The `LogManager` is
configured via `LogManager.configure()` whose values are
easily populated from command line options or a config file. You
can modify the configuration again at any point.
2. Create one or more output handlers via
`LogManager.create_log_handlers()` an easy to use yet powerful
interface.
3. In your code create loggers via `LogManager.get_logger()`. Since
loggers are normally bound to a class this method is optimized for
that case, all you need to do in the call ``__init__()`` is::
log_mgr.get_logger(self, True)
Then emitting messages is as simple as ``self.debug()`` or ``self.error()``
Example:
--------
::
# Step 1, Create log manager and configure it
prog_name = 'my_app'
log_mgr = LogManager(prog_name)
log_mgr.configure(dict(verbose=True))
# Step 2, Create handlers
log_mgr.create_log_handlers([dict(name='my_app stdout',
stream=sys.stdout,
level=logging.INFO),
dict(name='my_app file',
filename='my_app.log',
level=logging.DEBUG)])
# Step 3, Create and use a logger in your code
class FooBar:
def __init__(self, name):
log_mgr.get_logger(self, True)
self.info("I'm alive! %s", name)
foobar = FooBar('Dr. Frankenstein')
# Dump the log manager state for illustration
print
print log_mgr
Running the above code would produce::
<INFO>: I'm alive! Dr. Frankenstein
root_logger_name: my_app
configure_state: None
default_level: INFO
debug: False
verbose: True
number of loggers: 2
"my_app" [level=INFO]
"my_app.__main__.FooBar" [level=INFO]
number of handlers: 2
"my_app file" [level=DEBUG]
"my_app stdout" [level=INFO]
number of logger regexps: 0
*Note, Steps 1 & 2 were broken out for expository purposes.* You can
pass your handler configuration into `LogManager.configure()`. The above
could have been simpler and more compact.::
# Step 1 & 2, Create log manager, and configure it and handlers
prog_name = 'my_app'
log_mgr = LogManager(prog_name)
log_mgr.configure(dict(verbose=True,
handlers = [dict(name='my_app stdout',
stream=sys.stdout,
level=logging.INFO),
dict(name='my_app file',
filename='my_app.log',
level=logging.DEBUG)])
FAQ (Frequently Asked Questions)
================================
#. **Why is this better than logging.basicConfig? The short example
for the LogManager doesn't seem much different in complexity from
basicConfig?**
* You get independent logging namespaces. You can instantiate
multiple logging namespaces. If you use this module you'll be
isolated from other users of the Python logging module avoiding
conflicts.
* Creating and initializing loggers for classes is trivial. One
simple call creates the logger, configures it, and sets logging
methods on the class instance.
* You can easily configure individual loggers to different
levels. For example turn on debuging for just the part of the
code you're working on.
* The configuration is both simple and powerful. You get many more
options than with basicConfig.
* You can dynamically reset the logging configuration during
execution, you're not forced to live with the config established
during program initialization.
* The manager optimizes the use of the logging objects, you'll
spend less time executing pointless logging code for messages
that won't be emitted.
* You can see the state of all the logging objects in your
namespace from one centrally managed location.
* You can configure a LogManager to use the standard logging root
logger and get all the benefits of this API.
#. **How do I turn on debug logging for a specific class without
affecting the rest of the logging configuration?**
Use a logger regular expression to bind a custom level to loggers
whose name matches the regexp. See `LogManager.configure()`
for details.
Lets say you want to set your Foo.Bar class to debug, then do
this::
log_mgr.configure(dict(logger_regexps=[(r'Foo\.Bar', 'debug')]))
#. **I set the default_level but all my loggers are configured
with a higher level, what happened?**
You probably don't have any handlers defined at or below the
default_level. The level set on a logger will never be
lower than the lowest level handler available to that logger.
#. **My logger's all have their level set to a huge integer, why?**
See above. Logger's will never have a level less than the level of
the handlers visible to the logger. If there are no handlers then
loggers can't output anything so their level is set to maxsize.
#. **I set the default_level but all the loggers are configured
at INFO or DEBUG, what happened?**
The verbose and debug config flags set the default_level to
INFO and DEBUG respectively as a convenience.
#. **I'm not seeing messages output when I expect them to be, what's
wrong?**
For a message to be emitted the following 3 conditions must hold:
* Message level >= logger's level
* Message level >= handler's level
* The message was not elided by a filter
To verify the above conditions hold print out the log manager state
(e.g. print log_mgr). Locate your logger, what level is at? Locate
the handler you expected to see the message appear on, what level
is it?
A General Discussion of Python Logging
======================================
The design of this module is driven by how the Python logging module
works. The following discussion complements the Python Logging Howto,
fills in some missing information and covers strategies for
implementing different functionality along with the trade-offs
involved.
Understanding when & how log messages are emitted:
--------------------------------------------------
Loggers provide the application interface for logging. Every logger
object has the following methods debug(), info(), warning(), error(),
critical(), exception() and log() all of which can accept a format
string and arguments. Applications generate logging messages by
calling one of these methods to produce a formatted message.
A logger's effective level is the first explicitly set level found
when searching from the logger through it's ancestors terminating at
the root logger. The root logger always has an explicit level
(defaults to WARNING).
For a message to be emitted by a handler the following must be true:
The logger's effective level must >= message level and it must not
be filtered by a filter attached to the logger, otherwise the
message is discarded.
If the message survives the logger check it is passed to a list of
handlers. A handler will emit the message if the handler's level >=
message level and its not filtered by a filter attached to the
handler.
The list of handlers is determined thusly: Each logger has a list of
handlers (which may be empty). Starting with the logger the message
was bound to the message is passed to each of it's handlers. Then
the process repeats itself by traversing the chain of loggers
through all of it's ancestors until it reaches the root logger. The
logger traversal will be terminated if the propagate flag on a logger
is False (by default propagate is True).
Let's look at a hypothetical logger hierarchy (tree)::
A
/ \\
B D
/
C
There are 4 loggers and 3 handlers
Loggers:
+-------+---------+---------+-----------+----------+
|Logger | Level | Filters | Propagate | Handlers |
+=======+=========+=========+===========+==========+
| A | WARNING | [] | False | [h1,h2] |
+-------+---------+---------+-----------+----------+
| A.B | ERROR | [] | False | [h3] |
+-------+---------+---------+-----------+----------+
| A.B.C | DEBUG | [] | True | |
+-------+---------+---------+-----------+----------+
| A.D | | [] | True | |
+-------+---------+---------+-----------+----------+
Handlers:
+---------+---------+---------+
| Handler | Level | Filters |
+=========+=========+=========+
| h1 | ERROR | [] |
+---------+---------+---------+
| h2 | WARNING | [] |
+---------+---------+---------+
| h3 | DEBUG | [] |
+---------+---------+---------+
Each of the loggers and handlers have empty filter lists in this
example thus the filter checks will always pass.
If a debug message is posted logger A.B.C the following would
happen. The effective level is determined. Since it does not have a
level set it's parent (A.B) is examined which has ERROR set,
therefore the effective level of A.B.C is ERROR. Processing
immediately stops because the logger's level of ERROR does not
permit debug messages.
If an error message is posted on logger A.B.C it passes the logger
level check and filter check therefore the message is passed along
to the handlers. The list of handlers on A.B.C is empty so no
handlers are called at this position in the logging hierarchy. Logger
A.B.C's propagate flag is True so parent logger A.B handlers are
invoked. Handler h3's level is DEBUG, it passes both the level and
filter check thus h3 emits the message. Processing now stops because
logger A.B's propagate flag is False.
Now let's see what would happen if a warning message was posted on
logger A.D. It's effective level is WARNING because logger A.D does
not have a level set, it's only ancestor is logger A, the root
logger which has a level of WARNING, thus logger's A.D effective
level is WARNING. Logger A.D has no handlers, it's propagate flag is
True so the message is passed to it's parent logger A, the root
logger. Logger A has two handlers h1 and h2. The level of h1 is
ERROR so the warning message is discarded by h1, nothing is emitted
by h1. Next handler h2 is invoked, it's level is WARNING so it
passes both the level check and the filter check, thus h2 emits the
warning message.
How to configure independent logging spaces:
--------------------------------------------
A common idiom is to hang all handlers off the root logger and set
the root loggers level to the desired verbosity. But this simplistic
approach runs afoul of several problems, in particular who controls
logging (accomplished by configuring the root logger). The usual
advice is to check and see if the root logger has any handlers set,
if so someone before you has configured logging and you should
inherit their configuration, all you do is add your own loggers
without any explicitly set level. If the root logger doesn't have
handlers set then you go ahead and configure the root logger to your
preference. The idea here is if your code is being loaded by another
application you want to defer to that applications logging
configuration but if your code is running stand-alone you need to
set up logging yourself.
But sometimes your code really wants it's own logging configuration
managed only by yourself completely independent of any logging
configuration by someone who may have loaded your code. Even if you
code is not designed to be loaded as a package or module you may be
faced with this problem. A trivial example of this is running your
code under a unit test framework which itself uses the logging
facility (remember there is only ever one root logger in any Python
process).
Fortunately there is a simple way to accommodate this. All you need
to do is create a "fake" root in the logging hierarchy which belongs
to you. You set your fake root's propagate flag to False, set a
level on it and you'll hang your handlers off this fake root. Then
when you create your loggers each should be a descendant of this
fake root. Now you've completely isolated yourself in the logging
hierarchy and won't be influenced by any other logging
configuration. As an example let's say your your code is called
'foo' and so you name your fake root logger 'foo'.::
my_root = logging.getLogger('foo') # child of the root logger
my_root.propagate = False
my_root.setLevel(logging.DEBUG)
my_root.addHandler(my_handler)
Then every logger you create should have 'foo.' prepended to it's
name. If you're logging my module your module's logger would be
created like this::
module_logger = logging.getLogger('foo.%s' % __module__)
If you're logging by class then your class logger would be::
class_logger = logging.getLogger('foo.%s.%s' % (self.__module__, self.__class__.__name__))
How to set levels:
------------------
An instinctive or simplistic assumption is to set the root logger to a
high logging level, for example ERROR. After all you don't want to be
spamming users with debug and info messages. Let's also assume you've
got two handlers, one for a file and one for the console, both
attached to the root logger (a common configuration) and you haven't
set the level on either handler (in which case the handler will emit
all levels).
But now let's say you want to turn on debugging, but just to the file,
the console should continue to only emit error messages.
You set the root logger's level to DEBUG. The first thing you notice is
that you're getting debug message both in the file and on the console
because the console's handler does not have a level set. Not what you
want.
So you go back restore the root loggers level back to it's original
ERROR level and set the file handler's level to DEBUG and the console
handler's level to ERROR. Now you don't get any debug messages because
the root logger is blocking all messages below the level of ERROR and
doesn't invoke any handlers. The file handler attached to the root
logger even though it's level is set to DEBUG never gets a chance to
process the message.
*IMPORTANT:* You have to set the logger's level to the minimum of all
the attached handler's levels, otherwise the logger may block the
message from ever reaching any handler.
In this example the root logger's level must be set to DEBUG, the file
handler's level to DEBUG, and the console handler's level set to
ERROR.
Now let's take a more real world example which is a bit more
complicated. It's typical to assign loggers to every major class. In
fact this is the design strategy of Java logging from which the Python
logging is modeled. In a large complex application or library that
means dozens or possibly hundreds of loggers. Now lets say you need to
trace what is happening with one class. If you use the simplistic
configuration outlined above you'll set the log level of the root
logger and one of the handlers to debug. Now you're flooded with debug
message from every logger in the system when all you wanted was the
debug messages from just one class.
How can you get fine grained control over which loggers emit debug
messages? Here are some possibilities:
(1) Set a filter.
.................
When a message is propagated to a logger in the hierarchy first the
loggers level is checked. If logger level passes then the logger
iterates over every handler attached to the logger first checking the
handler level. If the handler level check passes then the filters
attached to the handler are run.
Filters are passed the record (i.e. the message), it does not have
access to either the logger or handler it's executing within. You
can't just set the filter to only pass the records of the classes you
want to debug because that would block other important info, warning,
error and critical messages from other classes. The filter would have
to know about the "global" log level which is in effect and also pass
any messages at that level or higher. It's unfortunate the filter
cannot know the level of the logger or handler it's executing inside
of.
Also logger filters only are applied to the logger they are attached
to, i.e. the logger the message was generated on. They do not get
applied to any ancestor loggers. That means you can't just set a
filter on the root logger. You have to either set the filters on the
handlers or on every logger created.
The filter first checks the level of the message record. If it's
greater than debug it passes it. For debug messages it checks the set
of loggers which have debug messages enabled, if the message record
was generated on one of those loggers it passes the record, otherwise
it blocks it.
The only question is whether you attach the filter to every logger or
to a handful of handlers. The advantage of attaching the filter to
every logger is efficiency, the time spent handling the message can be
short circuited much sooner if the message is filtered earlier in the
process. The advantage of attaching the filter to a handler is
simplicity, you only have to do that when a handler is created, not
every place in the code where a logger is created.
(2) Conditionally set the level of each logger.
...............................................
When loggers are created a check is performed to see if the logger is
in the set of loggers for which debug information is desired, if so
it's level is set to DEBUG, otherwise it's set to the global
level. One has to recall there really isn't a single global level if
you want some handlers to emit info and above, some handlers error and
above, etc. In this case if the logger is not in the set of logger's
emitting debug the logger level should be set to the next increment
above debug level.
A good question to ask would be why not just leave the logger's level
unset if it's not in the set of loggers to be debugged? After all it
will just inherit the root level right? There are two problems with
that. 1) It wold actually inherit the level any ancestor logger and if
an ancestor was set to debug you've effectively turned on debugging
for all children of that ancestor logger. There are times you might
want that behavior, where all your children inherit your level, but
there are many cases where that's not the behavior you want. 2) A more
pernicious problem exists. The logger your handlers are attached to
MUST be set to debug level, otherwise your debug messages will never
reach the handlers for output. Thus if you leave a loggers level unset
and let it inherit it's effective level from an ancestor it might very
well inherit the debug level from the root logger. That means you've
completely negated your attempt to selectively set debug logging on
specific loggers. Bottom line, you really have to set the level on
every logger created if you want fine grained control.
Approach 2 has some distinct performance advantages. First of all
filters are not used, this avoids a whole processing step and extra
filter function calls on every message. Secondly a logger level check
is a simple integer compare which is very efficient. Thirdly the
processing of a message can be short circuited very early in the
processing pipeline, no ancestor loggers will be invoked and no
handlers will be invoked.
The downside is some added complexity at logger creation time. But
this is easily mitigated by using a utility function or method to
create the logger instead of just calling logger.getLogger().
Like every thing else in computer science which approach you take boils
down to a series of trade offs, most around how your code is
organized. You might find it easier to set a filter on just one or two
handlers. It might be easier to modify the configuration during
execution if the logic is centralized in just a filter function, but
don't let that sway you too much because it's trivial to iterate over
every logger and dynamically reset it's log level.
Now at least you've got a basic understanding of how this stuff hangs
together and what your options are. That's not insignificant, when I
was first introduced to logging in Java and Python I found it
bewildering difficult to get it do what I wanted.
John Dennis <[email protected]>
'''
from __future__ import print_function
#-------------------------------------------------------------------------------
import sys
import os
import pwd
import logging
import re
import time
import six
#-------------------------------------------------------------------------------
# Default format
LOGGING_DEFAULT_FORMAT = '%(levelname)s %(message)s'
# Maps a logging level name to it's numeric value
log_level_name_map = {
'notset' : logging.NOTSET,
'debug' : logging.DEBUG,
'info' : logging.INFO,
'warn' : logging.WARNING,
'warning' : logging.WARNING,
'error' : logging.ERROR,
'critical' : logging.CRITICAL
}
log_levels = (logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL)
logger_method_names = ('debug', 'info', 'warning', 'error', 'exception', 'critical')
#-------------------------------------------------------------------------------
def get_unique_levels(iterable):
'''
Given a iterable of objects containing a logging level return a
ordered list (min to max) of unique levels.
:parameters:
iterable
Iterable yielding objects with a logging level attribute.
:returns:
Ordered list (min to max) of unique levels.
'''
levels = set()
for obj in iterable:
level = getattr(obj, 'level', sys.maxsize)
if level != logging.NOTSET:
levels.add(level)
levels = list(levels)
levels.sort()
return levels
def get_minimum_level(iterable):
'''
Given a iterable of objects containing a logging level return the
minimum level. If no levels are defined return maxsize.
set of unique levels.
:parameters:
iterable
Iterable yielding objects with a logging level attribute.
:returns:
Ordered list (min to max) of unique levels.
'''
min_level = sys.maxsize
for obj in iterable:
level = getattr(obj, 'level', sys.maxsize)
if level != logging.NOTSET:
if level < min_level:
min_level = level
return min_level
def parse_log_level(level):
'''
Given a log level either as a string or integer
return a numeric logging level. The following case insensitive
names are recognized::
* notset
* debug
* info
* warn
* warning
* error
* critical
A string containing an integer is also recognized, for example
``"10"`` would map to ``logging.DEBUG``
The integer value must be the range [``logging.NOTSET``,
``logging.CRITICAL``] otherwise a value exception will be raised.
:parameters:
level
basestring or integer, level value to convert
:returns:
integer level value
'''
# Is it a string representation of an integer?
# If so convert to an int.
if isinstance(level, six.string_types):
try:
level = int(level)
except ValueError:
pass
# If it's a string lookup it's name and map to logging level
# otherwise validate the integer value is in range.
if isinstance(level, six.string_types):
result = log_level_name_map.get(level.lower()) #pylint: disable=E1103
if result is None:
raise ValueError('unknown log level (%s)' % level)
return result
elif isinstance(level, int):
if level < logging.NOTSET or level > logging.CRITICAL:
raise ValueError('log level (%d) out of range' % level)
return level
else:
raise TypeError('log level must be basestring or int, got (%s)' % type(level))
#-------------------------------------------------------------------------------
def logging_obj_str(obj):
'''
Unfortunately the logging Logger and Handler classes do not have a
custom __str__() function which converts the object into a human
readable string representation. This function takes any object
with a level attribute and outputs the objects name with it's
associated level. If a name was never set for the object then it's
repr is used instead.
:parameters:
obj
Object with a logging level attribute
:returns:
string describing the object
'''
name = getattr(obj, 'name', repr(obj))
text = '"%s" [level=%s]' % (name, logging.getLevelName(obj.level))
if isinstance(obj, logging.FileHandler):
text += ' filename="%s"' % obj.baseFilename
return text
#-------------------------------------------------------------------------------
class LogManager(object):
'''
This class wraps the functionality in the logging module to
provide an easier to use API for logging while providing advanced
features including a independent namespace. Each application or
library wishing to have it's own logging namespace should instantiate
exactly one instance of this class and use it to manage all it's
logging.
Traditionally (or simplistically) logging was set up with a single
global root logger with output handlers bound to it. The global
root logger (whose name is the empty string) was shared by all
code in a loaded process. The only the global unamed root logger
had a level set on it, all other loggers created inherited this
global level. This can cause conflicts in more complex scenarios
where loaded code wants to maintain it's own logging configuration
independent of whomever loaded it's code. By using only a single
logger level set on the global root logger it was not possible to
have fine grained control over individual logger output. The
pattern seen with this simplistic setup has been frequently copied
despite being clumsy and awkward. The logging module has the tools
available to support a more sophisitcated and useful model, but it
requires an overarching framework to manage. This class provides
such a framework.
The features of this logging manager are:
* Independent logging namespace.
* Simplifed method to create handlers.
* Simple setup for applications with command line args.
* Sophisitcated handler configuration
(e.g. file ownership & permissions)
* Easy fine grained control of logger output
(e.g. turning on debug for just 1 or 2 loggers)
* Holistic management of the interrelationships between
logging components.
* Ability to dynamically adjust logging configuration in
a running process.
An independent namespace is established by creating a independent
root logger for this manager (root_logger_name). This root logger
is a direct child of the global unamed root logger. All loggers
created by this manager will be descendants of this managers root
logger. The managers root logger has it's propagate flag set
to False which means all loggers and handlers created by this
manager will be isolated in the global logging tree.
Log level management:
---------------------
Traditionally loggers inherited their logging level from the root
logger. This was simple but made it impossible to independently
control logging output from different loggers. If you set the root
level to DEBUG you got DEBUG output from every logger in the
system, often overwhelming in it's voluminous output. Many times
you want to turn on debug for just one class (a common idom is to
have one logger per class). To achieve the fine grained control
you can either use filters or set a logging level on every logger
(see the module documentation for the pros and cons). This manager
sets a log level on every logger instead of using level
inheritence because it's more efficient at run time.
Global levels are supported via the verbose and debug flags
setting every logger level to INFO and DEBUG respectively. Fine
grained level control is provided via regular expression matching
on logger names (see `configure()` for the details. For
example if you want to set a debug level for the foo.bar logger
set a regular expression to match it and bind it to the debug
level. Note, the global verbose and debug flags always override
the regular expression level configuration. Do not set these
global flags if you want fine grained control.
The manager maintains the minimum level for all loggers under it's
control and the minimum level for all handlers under it's
control. The reason it does this is because there is no point in
generating debug messages on a logger if there is no handler
defined which will output a debug message. Thus when the level is
set on a logger it takes into consideration the set of handlers
that logger can emit to.
IMPORTANT: Because the manager maintains knowledge about all the
loggers and handlers under it's control it is essential you use
only the managers interface to modify a logger or handler and not
set levels on the objects directly, otherwise the manger will not
know to visit every object under it's control when a configuraiton
changes (see '`LogManager.apply_configuration()`).
Example Usage::
# Create a log managers for use by 'my_app'
log_mgr = LogManager('my_app')
# Create a handler to send error messages to stderr
log_mgr.create_log_handlers([dict(stream=sys.stdout,
level=logging.ERROR)])
# Create logger for a class
class Foo(object):
def __init__(self):
self.log = log_mgr.get_logger(self)
'''
def __init__(self, root_logger_name='', configure_state=None):
'''
Create a new LogManager instance using root_logger_name as the
parent of all loggers maintained by the manager.
Only one log manger should be created for each logging namespace.
:parameters:
root_logger_name
The name of the root logger. All loggers will be prefixed
by this name.
configure_state
Used by clients of the log manager to track the
configuration state, may be any object.
:return:
LogManager instance
'''
self.loggers = {} # dict, key is logger name, value is logger object
self.handlers = {} # dict, key is handler name, value is handler object
self.configure_state = configure_state
self.root_logger_name = root_logger_name
self.default_level = 'error'
self.debug = False
self.verbose = False
self.logger_regexps = []
self.root_logger = self.get_logger(self.root_logger_name)
# Stop loggers and handlers from searching above our root
self.root_logger.propagate = False
def _get_default_level(self):
return self._default_level
def _set_default_level(self, value):
level = parse_log_level(value)
self._default_level = level
self.apply_configuration()
default_level = property(_get_default_level, _set_default_level,
doc='see log_manager.parse_log_level()` for details on how the level can be specified during assignement.')
def set_default_level(self, level, configure_state=None):
'''
Reset the default logger level, updates all loggers.
Note, the default_level may also be set by assigning to the
default_level attribute but that does not update the configure_state,
this method is provided as a convenience to simultaneously set the
configure_state if so desired.
:parameters:
level
The new default level for the log manager. See
`log_manager.parse_log_level()` for details on how the
level can be specified.
configure_state
If other than None update the log manger's configure_state
variable to this object. Clients of the log manager can
use configure_state to track the state of the log manager.
'''
level = parse_log_level(level)
self._default_level = level
self.apply_configuration(configure_state)
def __str__(self):
'''
When str() is called on the LogManager output it's state.
'''
text = ''
text += 'root_logger_name: %s\n' % (self.root_logger_name)
text += 'configure_state: %s\n' % (self.configure_state)
text += 'default_level: %s\n' % (logging.getLevelName(self.default_level))
text += 'debug: %s\n' % (self.debug)
text += 'verbose: %s\n' % (self.verbose)
text += 'number of loggers: %d\n' % (len(self.loggers))
loggers = [logging_obj_str(x) for x in self.loggers.values()]
loggers.sort()
for logger in loggers:
text += ' %s\n' % (logger)
text += 'number of handlers: %d\n' % (len(self.handlers))
handlers = [logging_obj_str(x) for x in self.handlers.values()]
handlers.sort()
for handler in handlers:
text += ' %s\n' % (handler)
text += 'number of logger regexps: %d\n' % (len(self.logger_regexps))
for regexp, level in self.logger_regexps:
text += ' "%s" => %s\n' % (regexp, logging.getLevelName(level))
return text
def configure(self, config, configure_state=None):
'''
The log manager is initialized from key,value pairs in the
config dict. This may be called any time to modify the
logging configuration at run time.
The supported entries in the config dict are:
default_level
The default level applied to a logger when not indivdually
configured. The verbose and debug config items override
the default level. See `log_manager.parse_log_level()` for
details on how the level can be specified.
verbose
Boolean, if True sets default_level to INFO.
debug
Boolean, if True sets default_level to DEBUG.
logger_regexps
List of (regexp, level) tuples. This is a an ordered list
regular expressions used to match against a logger name to
configure the logger's level. The first regexp in the
sequence which matches the logger name will use the the
level bound to that regexp to set the logger's level. If
no regexp matches the logger name then the logger will be
assigned the default_level.
The regular expression comparision is performed with the
re.search() function which means the match can be located
anywhere in the name string (as opposed to the start of
the string). Do not forget to escape regular
expression metacharacters when appropriate. For example
dot ('.') is used to seperate loggers in a logging
hierarchy path (e.g. a.b.c)
Examples::
# To match exactly the logger a.b.c and set it to DEBUG:
logger_regexps = [(r'^a\.b\.c$', 'debug')]
# To match any child of a.b and set it to INFO:
logger_regexps = [(r'^a\.b\..*', 'info')]
# To match any leaf logger with the name c and set it to level 5:
logger_regexps = [(r'\.c$', 5)]
handlers
List of handler config dicts or (config, logger)
tuples. See `create_log_handlers()` for details
of a hanlder config.
The simple form where handlers is a list of dicts each
handler is bound to the log mangers root logger (see
`create_log_handlers()` optional ``logger``
parameter). If you want to bind each handler to a specific
logger other then root handler then group the handler config
with a logger in a (config, logger) tuple. The logger may be
either a logger name or a logger instance. The following are
all valid methods of passing handler configuration.::
# List of 2 config dicts; both handlers bound to root logger
[{}, {}]
# List of 2 tuples; first handler bound to logger_name1
# by name, second bound to logger2 by object.
[({}, 'logger_name1'), ({}, logger2']
# List of 1 dict, 1 tuple; first bound to root logger,
# second bound to logger_name by name
[{}, ({}, 'logger_name']
:parameters:
config
Dict of <key,value> pairs describing the configuration.
configure_state
If other than None update the log manger's configure_state
variable to this object. Clients of the log manager can
use configure_state to track the state of the log manager.
'''
for attr in ('debug', 'verbose', 'logger_regexps'):
value = config.get(attr)
if value is not None:
setattr(self, attr, value)
attr = 'default_level'
value = config.get(attr)
if value is not None:
try:
level = parse_log_level(value)
except Exception as e:
raise ValueError("could not set %s (%s)" % (attr, e))
setattr(self, attr, level)
attr = 'handlers'
handlers = config.get(attr)
if handlers is not None:
for item in handlers:
logger = self.root_logger
config = None
if isinstance(item, dict):
config = item
elif isinstance(item, tuple):
if len(item) != 2:
raise ValueError('handler tuple must have exactly 2 items, got "%s"' % item)
config = item[0]
logger = item[1]
else:
raise TypeError('expected dict or tuple for handler item, got "%s", handlers=%s' % \
type(item), value)
if not isinstance(config, dict):
raise TypeError('expected dict for handler config, got "%s"', type(config))
if isinstance(logger, six.string_types):
logger = self.get_logger(logger)
else:
if not isinstance(logger, logging.Logger):
raise TypeError('expected logger name or logger object in %s' % item)
self.create_log_handlers([config], logger, configure_state)
if self.verbose:
self.default_level = logging.INFO
if self.debug:
self.default_level = logging.DEBUG
self.apply_configuration(configure_state)
def create_log_handlers(self, configs, logger=None, configure_state=None):
'''
Create new handlers and attach them to a logger (log mangers
root logger by default).
*Note, you may also pass the handler configs to `LogManager.configure()`.*
configs is an iterable yielding a dict. Each dict configures a
handler. Currently two types of handlers are supported:
* stream
* file
Which type of handler is created is determined by the presence of
the ``stream`` or ``filename`` in the dict.
Configuration keys:
===================
Handler type keys:
------------------
Exactly of the following must present in the config dict:
stream
Use the specified stream to initialize the StreamHandler.
filename
Specifies that a FileHandler be created, using the specified
filename.
log_handler
Specifies a custom logging.Handler to use
Common keys:
------------
name
Set the name of the handler. This is optional but can be
useful when examining the logging configuration.
For files defaults to ``'file:absolute_path'`` and for streams
it defaults to ``'stream:stream_name'``
format
Use the specified format string for the handler.
time_zone_converter
Log record timestamps are seconds since the epoch in the UTC
time zone stored as floating point values. When the formatter
inserts a timestamp via the %(asctime)s format substitution it
calls a time zone converter on the timestamp which returns a
time.struct_time value to pass to the time.strftime function
along with the datefmt format conversion string. The time
module provides two functions with this signature,
time.localtime and time.gmtime which performs a conversion to
local time and UTC respectively. time.localtime is the default
converter. Setting the time zone converter to time.gmtime is
appropriate for date/time strings in UTC. The
time_zone_converter attribute may be any function with the
correct signature. Or as a convenience you may also pass a
string which will select either the time.localtime or the
time.gmtime converter. The case insenstive string mappings
are::
'local' => time.localtime
'localtime' => time.localtime
'gmt' => time.gmtime
'gmtime' => time.gmtime
'utc' => time.gmtime
datefmt
Use the specified time.strftime date/time format when
formatting a timestamp via the %(asctime)s format
substitution. The timestamp is first converted using the
time_zone_converter to either local or UTC
level
Set the handler logger level to the specified level. May be
one of the following strings: 'debug', 'info', 'warn',
'warning', 'error', 'critical' or any of the logging level
constants. Thus level='debug' is equivalent to
level=logging.DEBUG. Defaults to self.default_level.
File handler keys:
------------------
filemode
Specifies the mode to open the file. Defaults to 'a' for
append, use 'w' for write.
permission
Set the permission bits on the file (i.e. chmod).
Must be a valid integer (e.g. 0660 for rw-rw----)
user
Set the user owning the file. May be either a numeric uid or a
basestring with a user name in the passwd file.
group
Set the group associated with the file, May be either a
numeric gid or a basestring with a group name in the groups
file.
Examples:
---------
The following shows how to set two handlers, one for a file
(ipa.log) at the debug log level and a second handler set to
stdout (e.g. console) at the info log level. (One handler sets it
level with a simple name, the other with a logging constant just
to illustrate the flexibility) ::
# Get a root logger
log_mgr = LogManger('my_app')
# Create the handlers
log_mgr.create_log_handlers([dict(filename='my_app.log',
level='info',
user='root',
group='root',
permission=0600,
time_zone_converter='utc',
datefmt='%Y-%m-%dT%H:%M:%SZ', # ISO 8601
format='<%(levelname)s> [%(asctime)s] module=%(name)s "%(message)s"'),
dict(stream=sys.stdout,
level=logging.ERROR,
format='%(levelname)s: %(message)s')])
# Create a logger for my_app.foo.bar
foo_bar_log = log_mgr.get_logger('foo.bar')
root_logger.info("Ready to process requests")
foo_bar_log.error("something went boom")
In the file my_app.log you would see::
<INFO> [2011-10-26T01:39:00Z] module=my_app "Ready to process requests"
<ERROR> [2011-10-26T01:39:00Z] module=may_app.foo.bar "something went boom"
On the console you would see::
ERROR: something went boom
:parameters:
configs
Sequence of dicts (any iterable yielding a dict). Each
dict creates one handler and contains the configuration
parameters used to create that handler.
logger
If unspecified the handlers will be attached to the
LogManager.root_logger, otherwise the handlers will be
attached to the specified logger.
configure_state
If other than None update the log manger's configure_state
variable to this object. Clients of the log manager can
use configure_state to track the state of the log manager.
:return:
The list of created handers.
'''
if logger is None:
logger = self.root_logger
handlers = []
# Iterate over handler configurations.
for cfg in configs:
# Type of handler?
filename = cfg.get('filename')
stream = cfg.get("stream")
log_handler = cfg.get("log_handler")
if filename:
if "stream" in cfg:
raise ValueError("both filename and stream are specified, must be one or the other, config: %s" % cfg)
path = os.path.abspath(filename)
filemode = cfg.get('filemode', 'a')
handler = logging.FileHandler(path, filemode)
# Set the handler name
name = cfg.get("name")
if name is None:
name = 'file:%s' % (path)
handler.name = name
# Path should now exist, set ownership and permissions if requested.
# Set uid, gid (e.g. chmod)
uid = gid = None
user = cfg.get('user')
group = cfg.get('group')
if user is not None:
if isinstance(user, six.string_types):
pw = pwd.getpwnam(user)
uid = pw.pw_uid
elif isinstance(user, int):
uid = user
else:
raise TypeError("user (%s) is not int or basestring" % user)
if group is not None:
if isinstance(group, six.string_types):
pw = pwd.getpwnam(group)
gid = pw.pw_gid
elif isinstance(group, int):
gid = group
else:
raise TypeError("group (%s) is not int or basestring" % group)
if uid is not None or gid is not None:
if uid is None:
uid = -1
if gid is None:
gid = -1
os.chown(path, uid, gid)
# Set file permissions (e.g. mode)
permission = cfg.get('permission')
if permission is not None:
os.chmod(path, permission)
elif stream:
handler = logging.StreamHandler(stream)
# Set the handler name
name = cfg.get("name")
if name is None:
name = 'stream:%s' % (stream)
handler.name = name
elif log_handler:
handler = log_handler
else:
raise ValueError(
"neither file nor stream nor log_handler specified in "
"config: %s" % cfg)
# Add the handler
handlers.append(handler)
# Configure message formatting on the handler
format = cfg.get("format", LOGGING_DEFAULT_FORMAT)
datefmt = cfg.get("datefmt", None)
formatter = logging.Formatter(format, datefmt)
time_zone_converter = cfg.get('time_zone_converter', time.localtime)
if isinstance(time_zone_converter, six.string_types):
converter = {'local' : time.localtime,
'localtime' : time.localtime,
'gmt' : time.gmtime,
'gmtime' : time.gmtime,
'utc' : time.gmtime}.get(time_zone_converter.lower())
if converter is None:
raise ValueError("invalid time_zone_converter name (%s)" % \
time_zone_converter)
elif callable(time_zone_converter):
converter = time_zone_converter
else:
raise ValueError("time_zone_converter must be basestring or callable, not %s" % \
type(time_zone_converter))
formatter.converter = converter
handler.setFormatter(formatter)
# Set the logging level
level = cfg.get('level')
if level is not None:
try:
level = parse_log_level(level)
except Exception as e:
print('could not set handler log level "%s" (%s)' % (level, e), file=sys.stderr)
level = None
if level is None:
level = self.default_level
handler.setLevel(level)
for handler in handlers:
if handler.name in self.handlers:
raise ValueError('handler "%s" already exists' % handler.name)
logger.addHandler(handler)
self.handlers[handler.name] = handler
self.apply_configuration(configure_state)
return handlers
def get_handler(self, handler_name):
'''
Given a handler name return the handler object associated with
it.
:parameters:
handler_name
Name of the handler to look-up.
:returns:
The handler object associated with the handler name.
'''
handler = self.handlers.get(handler_name)
if handler is None:
raise KeyError('handler "%s" is not defined' % handler_name)
return handler
def set_handler_level(self, handler_name, level, configure_state=None):
'''
Given a handler name, set the handler's level, return previous level.
:parameters:
handler_name
Name of the handler to look-up.
level
The new level for the handler. See
`log_manager.parse_log_level()` for details on how the
level can be specified.
configure_state
If other than None update the log manger's configure_state
variable to this object. Clients of the log manager can
use configure_state to track the state of the log manager.
:returns:
The handler's previous level
'''
handler = self.get_handler(handler_name)
level = parse_log_level(level)
prev_level = handler.level
handler.setLevel(level)
self.apply_configuration(configure_state)
return prev_level
def get_loggers_with_handler(self, handler):
'''
Given a handler return a list of loggers that hander is bound to.
:parameters:
handler
The name of a handler or a handler object.
:returns:
List of loggers with the handler is bound to.
'''
if isinstance(handler, six.string_types):
handler = self.get_handler(handler)
elif isinstance(handler, logging.Handler):
if not handler in self.handlers.values():
raise ValueError('handler "%s" is not managed by this log manager' % \
logging_obj_str(handler))
else:
raise TypeError('handler must be basestring or Handler object, got %s' % type(handler))
loggers = []
for logger in self.loggers.values():
if handler in logger.handlers:
loggers.append(logger)
return loggers
def remove_handler(self, handler, logger=None, configure_state=None):
'''
Remove the named handler. If logger is unspecified the handler
will be removed from all managed loggers, otherwise it will be
removed from only the specified logger.
:parameters:
handler
The name of the handler to be removed or the handler object.
logger
If unspecified the handler is removed from all loggers,
otherwise the handler is removed from only this logger.
configure_state
If other than None update the log manger's configure_state
variable to this object. Clients of the log manager can
use configure_state to track the state of the log manager.
'''
if isinstance(handler, six.string_types):
handler = self.get_handler(handler)
elif not isinstance(handler, logging.Handler):
raise TypeError('handler must be basestring or Handler object, got %s' % type(handler))
handler_name = handler.name
if handler_name is None:
raise ValueError('handler "%s" does not have a name' % logging_obj_str(handler))
loggers = self.get_loggers_with_handler(handler)
if logger is None:
for logger in loggers:
logger.removeHandler(handler)
del self.handlers[handler_name]
else:
if not logger in loggers:
raise ValueError('handler "%s" is not bound to logger "%s"' % \
(handler_name, logging_obj_str(logger)))
logger.removeHandler(handler)
if len(loggers) == 1:
del self.handlers[handler_name]
self.apply_configuration(configure_state)
def apply_configuration(self, configure_state=None):
'''
Using the log manager's internal configuration state apply the
configuration to all the objects managed by the log manager.
:parameters:
configure_state
If other than None update the log manger's configure_state
variable to this object. Clients of the log manager can
use configure_state to track the state of the log manager.
'''
if configure_state is not None:
self.configure_state = configure_state
for logger in self.loggers.values():
self._set_configured_logger_level(logger)
def get_configured_logger_level(self, name):
'''
Given a logger name return it's level as defined by the
`LogManager` configuration.
:parameters:
name
logger name
:returns:
log level
'''
level = self.default_level
for regexp, config_level in self.logger_regexps:
if re.search(regexp, name):
level = config_level
break
level = parse_log_level(level)
return level
def get_logger_handlers(self, logger):
'''
Return the set of unique handlers visible to this logger.
:parameters:
logger
The logger whose visible and enabled handlers will be returned.
:return:
Set of handlers
'''
handlers = set()
while logger:
for handler in logger.handlers:
handlers.add(handler)
if logger.propagate:
logger = logger.parent
else:
logger = None
return handlers
def get_minimum_handler_level_for_logger(self, logger):
'''
Return the minimum handler level of all the handlers the
logger is exposed to.
:parameters:
logger
The logger whose handlers will be examined.
:return:
The minimum of all the handler's levels. If no
handlers are defined sys.maxsize will be returned.
'''
handlers = self.get_logger_handlers(logger)
min_level = get_minimum_level(handlers)
return min_level
def _set_configured_logger_level(self, logger):
'''
Based on the current configuration maintained by the log
manager set this logger's level.
If the level specified for this logger by the configuration is
less than the minimum level supported by the output handlers
the logger is exposed to then adjust the logger's level higher
to the minimum handler level. This is a performance
optimization, no point in emitting a log message if no
handlers will ever output it.
:parameters:
logger
The logger whose level is being configured.
:return:
The level actually set on the logger.
'''
level = self.get_configured_logger_level(logger.name)
minimum_handler_level = self.get_minimum_handler_level_for_logger(logger)
if level < minimum_handler_level:
level = minimum_handler_level
logger.setLevel(level)
return level
def get_logger(self, who, bind_logger_names=False):
'''
Return the logger for an object or a name. If the logger
already exists return the existing instance otherwise create
the logger.
The who parameter may be either a name or an object.
Loggers are identified by a name but because loggers are
usually bound to a class this method is optimized to handle
that case. If who is an object:
* The name object's module name (dot seperated) and the
object's class name.
* Optionally the logging output methods can be bound to the
object if bind_logger_names is True.
Otherwise if who is a basestring it is used as the logger
name.
In all instances the root_logger_name is prefixed to every
logger created by the manager.
:parameters:
who
If a basestring then use this as the logger name,
prefixed with the root_logger_name. Otherwise who is treated
as a class instance. The logger name is formed by prepending
the root_logger_name to the module name and then appending the
class name. All name components are dot seperated. Thus if the
root_logger_name is 'my_app', the class is ParseFileConfig
living in the config.parsers module the logger name will be:
``my_app.config.parsers.ParseFileConfig``.
bind_logger_names
If true the class instance will have the following bound
to it: ``log``, ``debug()``, ``info()``, ``warning()``,
``error()``, ``exception()``, ``critical()``. Where log is
the logger object and the others are the loggers output
methods. This is a convenience which allows you emit
logging messages directly, for example::
self.debug('%d names defined', self.num_names).
:return:
The logger matching the name indicated by who. If the
logger pre-existed return that instance otherwise create the
named logger return it.
'''
is_object = False
if isinstance(who, six.string_types):
obj_name = who
else:
is_object = True
obj_name = '%s.%s' % (who.__module__, who.__class__.__name__)
if obj_name == self.root_logger_name:
logger_name = obj_name
else:
logger_name = self.root_logger_name + '.' + obj_name
# If logger not in our cache then create and initialize the logger.
logger = self.loggers.get(logger_name)
if logger is None:
logger = logging.getLogger(logger_name)
self.loggers[logger_name] = logger
self._set_configured_logger_level(logger)
if bind_logger_names and is_object and getattr(who, '__log_manager', None) is None:
setattr(who, '__log_manager', self)
method = 'log'
if hasattr(who, method):
raise ValueError('%s is already bound to %s' % (method, repr(who)))
setattr(who, method, logger)
for method in logger_method_names:
if hasattr(who, method):
raise ValueError('%s is already bound to %s' % (method, repr(who)))
setattr(who, method, getattr(logger, method))
return logger
| tbabej/freeipa | ipapython/log_manager.py | Python | gpl-3.0 | 62,638 |
"""
Created by Emille Ishida in May, 2015.
Class to implement calculations on data matrix.
"""
import os
import sys
import matplotlib.pylab as plt
import numpy as np
from multiprocessing import Pool
from snclass.treat_lc import LC
from snclass.util import read_user_input, read_snana_lc, translate_snid
from snclass.functions import core_cross_val, screen
##############################################
class DataMatrix(object):
"""
Data matrix class.
Methods:
- build: Build data matrix according to user input file specifications.
- reduce_dimension: Perform dimensionality reduction.
- cross_val: Perform cross-validation.
Attributes:
- user_choices: dict, user input choices
- snid: vector, list of objects identifiers
- datam: array, data matrix for training
- redshift: vector, redshift for training data
- sntype: vector, classification of training data
- low_dim_matrix: array, data matrix in KernelPC space
- transf_test: function, project argument into KernelPC space
- final: vector, optimize parameter values
"""
def __init__(self, input_file=None):
"""
Read user input file.
input: input_file -> str
name of user input file
"""
self.datam = None
self.snid = []
self.redshift = None
self.sntype = None
self.low_dim_matrix = None
self.transf_test = None
self.final = None
self.test_projection = []
if input_file is not None:
self.user_choices = read_user_input(input_file)
def check_file(self, filename, epoch=True, ref_filter=None):
"""
Construct one line of the data matrix.
input: filename, str
file of raw data for 1 supernova
epoch, bool - optional
If true, check if SN satisfies epoch cuts
Default is True
ref_filter, str - optional
Reference filter for peak MJD calculation
Default is None
"""
screen('Fitting ' + filename, self.user_choices)
# translate identifier
self.user_choices['path_to_lc'] = [translate_snid(filename, self.user_choices['photon_flag'][0])[0]]
# read light curve raw data
raw = read_snana_lc(self.user_choices)
# initiate light curve object
lc_obj = LC(raw, self.user_choices)
# load GP fit
lc_obj.load_fit_GP(self.user_choices['samples_dir'][0] + filename)
# normalize
lc_obj.normalize(ref_filter=ref_filter)
# shift to peak mjd
lc_obj.mjd_shift()
if epoch:
# check epoch requirements
lc_obj.check_epoch()
else:
lc_obj.epoch_cuts = True
if lc_obj.epoch_cuts:
# build data matrix lines
lc_obj.build_steps()
# store
obj_line = []
for fil in self.user_choices['filters']:
for item in lc_obj.flux_for_matrix[fil]:
obj_line.append(item)
rflag = self.user_choices['redshift_flag'][0]
redshift = raw[rflag][0]
obj_class = raw[self.user_choices['type_flag'][0]][0]
self.snid.append(raw['SNID:'][0])
return obj_line, redshift, obj_class
else:
screen('... Failed to pass epoch cuts!', self.user_choices)
screen('\n', self.user_choices)
return None
def store_training(self, file_out):
"""
Store complete training matrix.
input: file_out, str
output file name
"""
# write to file
if file_out is not None:
op1 = open(file_out, 'w')
op1.write('SNID type z LC...\n')
for i in xrange(len(self.datam)):
op1.write(str(self.snid[i]) + ' ' + str(self.sntype[i]) +
' ' + str(self.redshift[i]) + ' ')
for j in xrange(len(self.datam[i])):
op1.write(str(self.datam[i][j]) + ' ')
op1.write('\n')
op1.close()
def build(self, file_out=None, check_epoch=True, ref_filter=None):
"""
Build data matrix according to user input file specifications.
input: file_out -> str, optional
file to store data matrix (str). Default is None
check_epoch -> bool, optional
If True check if SN satisfies epoch cuts
Default is True
ref_filter -> str, optional
Reference filter for MJD calculation
Default is None
"""
# list all files in sample directory
file_list = os.listdir(self.user_choices['samples_dir'][0])
datam = []
redshift = []
sntype = []
for obj in file_list:
if 'mean' in obj:
sn_char = self.check_file(obj, epoch=check_epoch,
ref_filter=ref_filter)
if sn_char is not None:
datam.append(sn_char[0])
redshift.append(sn_char[1])
sntype.append(sn_char[2])
self.datam = np.array(datam)
self.redshift = np.array(redshift)
self.sntype = np.array(sntype)
# store results
self.store_training(file_out)
def reduce_dimension(self):
"""Perform dimensionality reduction with user defined function."""
# define dimensionality reduction function
func = self.user_choices['dim_reduction_func']
# reduce dimensionality
self.low_dim_matrix = func(self.datam, self.user_choices)
# define transformation function
self.transf_test = func(self.datam, self.user_choices, transform=True)
def cross_val(self):
"""Optimize the hyperparameters for RBF kernel and ncomp."""
# correct type parameters if necessary
types_func = self.user_choices['transform_types_func']
if types_func is not None:
self.sntype = types_func(self.sntype, self.user_choices['Ia_flag'][0])
# initialize parameters
data = self.datam
types = self.sntype
choices = self.user_choices
nparticles = self.user_choices['n_cross_val_particles']
parameters = []
for i in xrange(nparticles):
pars = {}
pars['data'] = data
pars['types'] = types
pars['user_choices'] = choices
parameters.append(pars)
if int(self.user_choices['n_proc'][0]) > 0:
cv_func = self.user_choices['cross_validation_func']
pool = Pool(processes=int(self.user_choices['n_proc'][0]))
my_pool = pool.map_async(cv_func, parameters)
try:
results = my_pool.get(0xFFFF)
except KeyboardInterrupt:
print 'Interruputed by the user!'
sys.exit()
pool.close()
pool.join()
results = np.array(results)
else:
number = self.user_choices['n_cross_val_particles']
results = np.array([core_cross_val(pars)
for pars in parameters])
flist = list(results[:,len(results[0])-1])
max_success = max(flist)
indx_max = flist.index(max_success)
self.final = {}
for i in xrange(len(self.user_choices['cross_val_par'])):
par_list = self.user_choices['cross_val_par']
self.final[par_list[i]] = results[indx_max][i]
def final_configuration(self):
"""Determine final configuraton based on cross-validation results."""
#update optimized hyper-parameters
for par in self.user_choices['cross_val_par']:
indx = self.user_choices['cross_val_par'].index(par)
self.user_choices[par] = self.final[par]
#update low dimensional matrix
self.reduce_dimension()
def plot(self, pcs, file_out, show=False, test=None):
"""
Plot 2-dimensional scatter of data matrix in kPCA space.
input: pcs, vector of int
kernel PCs to be used as horizontal and vertical axis
file_out, str
file name to store final plot
show, bool, optional
if True show plot in screen
Default is False
test, dict, optional
keywords: data, type
if not None plot the projection of 1 photometric object
Default is None
"""
#define vectors to plot
xdata = self.low_dim_matrix[:,pcs[0]]
ydata = self.low_dim_matrix[:,pcs[1]]
if '0' in self.sntype:
snIa = self.sntype == '0'
nonIa = self.sntype != '0'
else:
snIa = self.sntype == 'Ia'
snIbc = self.sntype == 'Ibc'
snII = self.sntype == 'II'
plt.figure(figsize=(10,10))
if '0' in self.sntype:
plt.scatter(xdata[nonIa], ydata[nonIa], color='purple', marker='s',
label='spec non-Ia')
plt.scatter(xdata[snIa], ydata[snIa], color='blue', marker='o',
label='spec Ia')
else:
plt.scatter(xdata[snII], ydata[snII], color='purple', marker='s',
label='spec II')
plt.scatter(xdata[snIbc], ydata[snIbc], color='green', marker='^',
s=30, label='spec Ibc')
plt.scatter(xdata[snIa], ydata[snIa], color='blue', marker='o',
label='spec Ia')
if test is not None:
if len(test.samples_for_matrix) > 0:
plt.title('prob_Ia = ' + str(round(test['prob_Ia'], 2)))
if test.raw['SIM_NON1a:'][0] == '0':
sntype = 'Ia'
else:
sntype = 'nonIa'
plt.scatter([test.test_proj[0][pcs[0]]], [test.test_proj[0][pcs[1]]],
marker='*', color='red', s=75,
label='photo ' + sntype)
plt.xlabel('kPC' + str(pcs[0] + 1), fontsize=14)
plt.ylabel('kPC' + str(pcs[1] + 1), fontsize=14)
plt.legend(fontsize=12)
if show:
plt.show()
if file_out is not None:
plt.savefig(file_out)
plt.close()
def main():
"""Print documentation."""
print __doc__
if __name__ == '__main__':
main()
| emilleishida/snclass | snclass/matrix.py | Python | gpl-3.0 | 10,669 |
__author__ = 'harsha'
class ForceReply(object):
def __init__(self, force_reply, selective):
self.force_reply = force_reply
self.selective = selective
def get_force_reply(self):
return self.force_reply
def get_selective(self):
return self.selective
def __str__(self):
return str(self.__dict__) | harsha5500/pytelegrambot | telegram/ForceReply.py | Python | gpl-3.0 | 353 |
#total_ordering_student.py
import functools
@functools.total_ordering
class Student:
def __init__(self, firstname, lastname): #姓和名
self.firstname = firstname
self.lastname = lastname
def __eq__(self, other): #判断姓名是否一致
return ((self.lastname.lower(), self.firstname.lower()) ==
(other.lastname.lower(), other.firstname.lower()))
def __lt__(self, other): #self姓名<other姓名
return ((self.lastname.lower(), self.firstname.lower()) <
(other.lastname.lower(), other.firstname.lower()))
#测试代码
if __name__ == '__main__':
s1 = Student('Mary','Clinton')
s2 = Student('Mary','Clinton')
s3 = Student('Charlie','Clinton')
print(s1==s2)
print(s1>s3)
| GH1995/tools | archives/Python_江老师给的代码/chapter09/total_ordering_student.py | Python | gpl-3.0 | 777 |
from flask import Flask, render_template, request, redirect, url_for, flash, jsonify, Response
from celery import Celery
from werkzeug.utils import secure_filename
from VideoPlayer import VideoPlayer
from subprocess import Popen
import os
app = Flask(__name__)
local = False
if local:
UPLOAD_FOLDER = '/home/dabo02/Desktop/Projects/Side_Projects/Upwork_Tom_VideoShowroom/static/video/'
else:
UPLOAD_FOLDER='/home/pi/Desktop/Upwork_Tom_VideoShowroom/static/video/'
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(24, GPIO.OUT)
app.config['CELERY_BROKER_URL'] = 'amqp://'
app.config['CELERY_RESULT_BACKEND'] = 'amqp://'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
celery = Celery(app.name, broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
ALLOWED_EXTENSIONS = set(['mp3', 'mp4'])
light_state = False
exit_flag = False
current_video = None
preview_video = ''
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def check_for_current():
global current_video
if not current_video:
list_of_videos = os.listdir(UPLOAD_FOLDER)
current_video = list_of_videos[0]
@celery.task
def main_routine():
vp = VideoPlayer()
while True:
mag_switch = GPIO.input(23)
if mag_switch:
if not vp.video_is_playing:
GPIO.output(24, 0)
check_for_current()
global current_video
vp.set_video(UPLOAD_FOLDER + current_video)
vp.play_video()
else:
GPIO.output(24, 1)
vp.stop_video()
@app.route('/')
def dashboard():
video_list = os.listdir(UPLOAD_FOLDER)
video_info = {}
videos = []
global current_video
global preview_video
global light_state
preview = ''
for v in video_list:
if current_video:
if current_video in v:
current = True
else:
current = False
else:
current = False
if preview_video:
if preview_video in v:
preview = v
name = v.rsplit('.', 1)[0]
video_info = {'name': name, 'id': v, 'current': current}
videos.append(video_info)
return render_template('index.html', videos=videos, preview=preview, light_state=light_state)
@app.route('/upload_video', methods=['POST'])
def upload_video():
if 'video' not in request.files:
flash('No file part')
return redirect(url_for('dashboard'))
file = request.files['video']
if file.filename == '':
flash('No selected file')
return redirect(url_for('dashboard'))
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(filepath)
return redirect(url_for('dashboard'))
@app.route('/remove_video/<id>', methods=['GET'])
def remove_video(id):
video_to_remove = UPLOAD_FOLDER + '/' + id
os.remove(os.path.join(app.config['UPLOAD_FOLDER'], video_to_remove))
return redirect(url_for('dashboard'))
@app.route('/update_video/<id>', methods=['GET'])
def change_current_video(id):
new_video = id
global current_video
current_video = new_video
return redirect(url_for('dashboard'))
@app.route('/preview_video/<id>', methods=['GET'])
def preview_current_video(id):
global preview_video
preview_video = id
return redirect(url_for('dashboard'))
@app.route('/light_state/<state>', methods=['GET'])
def light_state(state):
global light_state
if state in 'True':
GPIO.output(24, 1)
light_state = True
return redirect(url_for('dashboard'))
GPIO.output(24, 0)
light_state = False
return redirect(url_for('dashboard'))
@app.route('/start')
def start_loop():
task = main_routine.apply_async()
return redirect(url_for('dashboard'))
@app.route('/reboot')
def reboot_pi():
GPIO.cleanup()
Popen('reboot', shell=True)
return '<div><h1>Rebooting Pi.....</h1></div>'
@app.route('/shutdown')
def shutdown_pi():
GPIO.cleanup()
Popen('shutdown -h now', shell=True)
return '<div><h1>Shutting Down Pi.....</h1></div>'
if __name__ == '__main__':
if local:
app.run(host='localhost', port=3000)
else:
app.run(host='0.0.0.0', port=3500)
| dabo02/Upwork_Tom_VideoShowroom | Back-End.py | Python | gpl-3.0 | 4,500 |
import todsynth
import os
import numpy
import json
import pandas
class Calibrator( object ):
'''
A todsynth.calibrator object is a container that stores coefficients
that transform RAW dac units to physical units for a given TOD.
'''
# Calibrator description.
#000000000000000000000000000000000000000000000000000000000000000000000000
name = ""
description = ""
calType = ""
# Information stored in the form of a dictionary. Careful not to abuse
# of this in the sense of using it to process data!
info = {}
#000000000000000000000000000000000000000000000000000000000000000000000000
# Calibration coefficients
coeffs = numpy.empty(0)
# Detector index to Unique Identifier array
__uid = numpy.empty(0)
def __init__( self ):
'''
self.name = name
self.description = descrp
self.calType = calType
'''
def setCoeffs( self, c , uid=None ):
'''
Set calibrator coefficients to c.
'''
# Perform numpy.copy() to avoid cross referencing stuff
self.__coeffs = numpy.copy( c )
if uid is not None:
self.__uid = numpy.copy(uid)
self.coeffs = self.coeffs[ self.__uid ]
else:
self.__uid = numpy.arange( len( self.coeffs ) )
def getCoeffs( self ):
'''
Get a *copy* of the coefficients array.
'''
return numpy.copy( self.coeffs )
def updateInfo( self, prop, value ):
'''
Update calibrator info with a pair of prop : value
'''
self.info.update( { 'prop' : value } )
def storeInPath( self , outPath ):
'''
Stores the calibrator in JSON format at the specified path.
'''
# Serialize this object
data = {
'coefficients' : self.__coeffs,
'uid' : self.__uid }
# Create PANDAS DataFrame out data
df = pandas.DataFrame( data )
# Save DataFrame to HDF5 format
df.to_csv( os.path.join(
outPath, "%s.%s.cal" % (self.name,self.calType) ),
index=False,
sep=' ',
header=True )
@classmethod
def readFromPath( cls, systemPath ):
'''
'''
self = cls()
name,caltype,_ = os.path.basename( systemPath ).split('.')
self.name = name
self.calType = caltype
self.description = ''
# Load file
calDF = pandas.read_csv(
systemPath,
header=0,
names=['coefficients', 'uid'],
delimiter=' ' )
self.setCoeffs( calDF['coefficients'], uid = calDF['uid'] )
return self
| pafluxa/todsynth | build/lib.linux-x86_64-2.7/todsynth/calibration/calibrator.py | Python | gpl-3.0 | 2,810 |
# -*- encoding: utf-8 -*-
"""Generic base class for cli hammer commands."""
import logging
from robottelo import ssh
from robottelo.cli import hammer
from robottelo.config import conf
class CLIError(Exception):
"""Indicates that a CLI command could not be run."""
class CLIReturnCodeError(Exception):
"""Indicates that a CLI command has finished with return code, different
from zero.
:param return_code: CLI command return code
:param stderr: contents of the ``stderr``
:param msg: explanation of the error
"""
def __init__(self, return_code, stderr, msg):
self.return_code = return_code
self.stderr = stderr
self.msg = msg
def __str__(self):
return self.msg
class Base(object):
"""
@param command_base: base command of hammer.
Output of recent `hammer --help`::
activation-key Manipulate activation keys.
architecture Manipulate architectures.
auth Foreman connection login/logout.
auth-source Manipulate auth sources.
capsule Manipulate capsule
compute-resource Manipulate compute resources.
content-host Manipulate content hosts on the server
content-view Manipulate content views.
docker-image Manipulate docker images
domain Manipulate domains.
environment Manipulate environments.
erratum Manipulate errata
fact Search facts.
filter Manage permission filters.
global-parameter Manipulate global parameters.
gpg Manipulate GPG Key actions on the server
host Manipulate hosts.
host-collection Manipulate host collections
hostgroup Manipulate hostgroups.
import Import data exported from a Red Hat Sat..
lifecycle-environment Manipulate lifecycle_environments
location Manipulate locations.
medium Manipulate installation media.
model Manipulate hardware models.
organization Manipulate organizations
os Manipulate operating system.
package Manipulate packages.
package-group Manipulate package groups
partition-table Manipulate partition tables.
ping Get the status of the server
product Manipulate products.
proxy Manipulate smart proxies.
puppet-class Search puppet modules.
puppet-module View Puppet Module details.
report Browse and read reports.
repository Manipulate repositories
repository-set Manipulate repository sets on the server
role Manage user roles.
sc-param Manipulate smart class parameters.
shell Interactive shell
subnet Manipulate subnets.
subscription Manipulate subscriptions.
sync-plan Manipulate sync plans
task Tasks related actions.
template Manipulate config templates.
user Manipulate users.
user-group Manage user groups.
@since: 27.Nov.2013
"""
command_base = None # each inherited instance should define this
command_sub = None # specific to instance, like: create, update, etc
command_requires_org = False # True when command requires organization-id
logger = logging.getLogger('robottelo')
@classmethod
def _handle_response(cls, response, ignore_stderr=None):
"""Verify ``return_code`` of the CLI command.
Check for a non-zero return code or any stderr contents.
:param response: a ``SSHCommandResult`` object, returned by
:mod:`robottelo.ssh.command`.
:param ignore_stderr: indicates whether to throw a warning in logs if
``stderr`` is not empty.
:returns: contents of ``stdout``.
:raises robottelo.cli.base.CLIReturnCodeError: If return code is
different from zero.
"""
if response.return_code != 0:
raise CLIReturnCodeError(
response.return_code,
response.stderr,
u'Command "{0} {1}" finished with return_code {2}\n'
'stderr contains following message:\n{3}'
.format(
cls.command_base,
cls.command_sub,
response.return_code,
response.stderr,
)
)
if len(response.stderr) != 0 and not ignore_stderr:
cls.logger.warning(
u'stderr contains following message:\n{0}'
.format(response.stderr)
)
return response.stdout
@classmethod
def add_operating_system(cls, options=None):
"""
Adds OS to record.
"""
cls.command_sub = 'add-operatingsystem'
result = cls.execute(cls._construct_command(options))
return result
@classmethod
def create(cls, options=None):
"""
Creates a new record using the arguments passed via dictionary.
"""
cls.command_sub = 'create'
if options is None:
options = {}
result = cls.execute(
cls._construct_command(options), output_format='csv')
# Extract new object ID if it was successfully created
if len(result) > 0 and 'id' in result[0]:
obj_id = result[0]['id']
# Fetch new object
# Some Katello obj require the organization-id for subcommands
info_options = {u'id': obj_id}
if cls.command_requires_org:
if 'organization-id' not in options:
raise CLIError(
'organization-id option is required for {0}.create'
.format(cls.__name__)
)
info_options[u'organization-id'] = options[u'organization-id']
new_obj = cls.info(info_options)
# stdout should be a dictionary containing the object
if len(new_obj) > 0:
result = new_obj
return result
@classmethod
def delete(cls, options=None):
"""Deletes existing record."""
cls.command_sub = 'delete'
return cls.execute(
cls._construct_command(options),
ignore_stderr=True,
)
@classmethod
def delete_parameter(cls, options=None):
"""
Deletes parameter from record.
"""
cls.command_sub = 'delete-parameter'
result = cls.execute(cls._construct_command(options))
return result
@classmethod
def dump(cls, options=None):
"""
Displays the content for existing partition table.
"""
cls.command_sub = 'dump'
result = cls.execute(cls._construct_command(options))
return result
@classmethod
def _get_username_password(cls, username=None, password=None):
"""Lookup for the username and password for cli command in following
order:
1. ``user`` or ``password`` parameters
2. ``foreman_admin_username`` or ``foreman_admin_password`` attributes
3. foreman.admin.username or foreman.admin.password configuration
:return: A tuple with the username and password found
:rtype: tuple
"""
if username is None:
try:
username = getattr(cls, 'foreman_admin_username')
except AttributeError:
username = conf.properties['foreman.admin.username']
if password is None:
try:
password = getattr(cls, 'foreman_admin_password')
except AttributeError:
password = conf.properties['foreman.admin.password']
return (username, password)
@classmethod
def execute(cls, command, user=None, password=None, output_format=None,
timeout=None, ignore_stderr=None, return_raw_response=None):
"""Executes the cli ``command`` on the server via ssh"""
user, password = cls._get_username_password(user, password)
# add time to measure hammer performance
perf_test = conf.properties.get('performance.test.foreman.perf', '0')
cmd = u'LANG={0} {1} hammer -v -u {2} -p {3} {4} {5}'.format(
conf.properties['main.locale'],
u'time -p' if perf_test == '1' else '',
user,
password,
u'--output={0}'.format(output_format) if output_format else u'',
command,
)
response = ssh.command(
cmd.encode('utf-8'),
output_format=output_format,
timeout=timeout,
)
if return_raw_response:
return response
else:
return cls._handle_response(
response,
ignore_stderr=ignore_stderr,
)
@classmethod
def exists(cls, options=None, search=None):
"""Search for an entity using the query ``search[0]="search[1]"``
Will be used the ``list`` command with the ``--search`` option to do
the search.
If ``options`` argument already have a search key, then the ``search``
argument will not be evaluated. Which allows different search query.
"""
if options is None:
options = {}
if search is not None and u'search' not in options:
options.update({u'search': u'{0}=\\"{1}\\"'.format(
search[0], search[1])})
result = cls.list(options)
if result:
result = result[0]
return result
@classmethod
def info(cls, options=None, output_format=None):
"""Reads the entity information."""
cls.command_sub = 'info'
if options is None:
options = {}
if cls.command_requires_org and 'organization-id' not in options:
raise CLIError(
'organization-id option is required for {0}.info'
.format(cls.__name__)
)
result = cls.execute(
command=cls._construct_command(options),
output_format=output_format
)
if output_format != 'json':
result = hammer.parse_info(result)
return result
@classmethod
def list(cls, options=None, per_page=True):
"""
List information.
@param options: ID (sometimes name works as well) to retrieve info.
"""
cls.command_sub = 'list'
if options is None:
options = {}
if 'per-page' not in options and per_page:
options[u'per-page'] = 10000
if cls.command_requires_org and 'organization-id' not in options:
raise CLIError(
'organization-id option is required for {0}.list'
.format(cls.__name__)
)
result = cls.execute(
cls._construct_command(options), output_format='csv')
return result
@classmethod
def puppetclasses(cls, options=None):
"""
Lists all puppet classes.
"""
cls.command_sub = 'puppet-classes'
result = cls.execute(
cls._construct_command(options), output_format='csv')
return result
@classmethod
def remove_operating_system(cls, options=None):
"""
Removes OS from record.
"""
cls.command_sub = 'remove-operatingsystem'
result = cls.execute(cls._construct_command(options))
return result
@classmethod
def sc_params(cls, options=None):
"""
Lists all smart class parameters.
"""
cls.command_sub = 'sc-params'
result = cls.execute(
cls._construct_command(options), output_format='csv')
return result
@classmethod
def set_parameter(cls, options=None):
"""
Creates or updates parameter for a record.
"""
cls.command_sub = 'set-parameter'
result = cls.execute(cls._construct_command(options))
return result
@classmethod
def update(cls, options=None):
"""
Updates existing record.
"""
cls.command_sub = 'update'
result = cls.execute(
cls._construct_command(options), output_format='csv')
return result
@classmethod
def with_user(cls, username=None, password=None):
"""Context Manager for credentials"""
if username is None:
username = conf.properties['foreman.admin.username']
if password is None:
password = conf.properties['foreman.admin.password']
class Wrapper(cls):
"""Wrapper class which defines the foreman admin username and
password to be used when executing any cli command.
"""
foreman_admin_username = username
foreman_admin_password = password
return Wrapper
@classmethod
def _construct_command(cls, options=None):
"""
Build a hammer cli command based on the options passed
"""
tail = u''
if options is None:
options = {}
for key, val in options.items():
if val is None:
continue
if val is True:
tail += u' --{0}'.format(key)
elif val is not False:
if isinstance(val, list):
val = ','.join(str(el) for el in val)
tail += u' --{0}="{1}"'.format(key, val)
cmd = u'{0} {1} {2}'.format(
cls.command_base,
cls.command_sub,
tail.strip()
)
return cmd
| abalakh/robottelo | robottelo/cli/base.py | Python | gpl-3.0 | 14,527 |
normIncludes = [
{"fieldName": "field1", "includes": "GOOD,VALUE", "excludes": "BAD,STUFF", "begins": "", "ends": "", "replace": "goodvalue"},
{"fieldName": "field1", "includes": "", "excludes": "", "begins": "ABC", "ends": "", "replace": "goodvalue"},
{"fieldName": "field1", "includes": "", "excludes": "", "begins": "", "ends": "XYZ", "replace": "goodvalue"},
{"fieldName": "field100"}
]
| rh-marketingops/dwm | dwm/test/test_normIncludes.py | Python | gpl-3.0 | 407 |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 31 16:04:18 2017
@author: adelpret
"""
import pinocchio as se3
import numpy as np
from pinocchio import RobotWrapper
from conversion_utils import config_sot_to_urdf, joints_sot_to_urdf, velocity_sot_to_urdf
from dynamic_graph.sot.torque_control.inverse_dynamics_balance_controller import InverseDynamicsBalanceController
from dynamic_graph.sot.torque_control.create_entities_utils import create_ctrl_manager
import dynamic_graph.sot.torque_control.hrp2.balance_ctrl_sim_conf as balance_ctrl_conf
import dynamic_graph.sot.torque_control.hrp2.control_manager_sim_conf as control_manager_conf
from dynamic_graph.sot.torque_control.tests.robot_data_test import initRobotData
np.set_printoptions(precision=3, suppress=True, linewidth=100);
def create_balance_controller(dt, q, conf, robot_name='robot'):
ctrl = InverseDynamicsBalanceController("invDynBalCtrl");
ctrl.q.value = tuple(q);
ctrl.v.value = (NJ+6)*(0.0,);
ctrl.wrench_right_foot.value = 6*(0.0,);
ctrl.wrench_left_foot.value = 6*(0.0,);
ctrl.posture_ref_pos.value = tuple(q[6:]);
ctrl.posture_ref_vel.value = NJ*(0.0,);
ctrl.posture_ref_acc.value = NJ*(0.0,);
ctrl.com_ref_pos.value = (0., 0., 0.8);
ctrl.com_ref_vel.value = 3*(0.0,);
ctrl.com_ref_acc.value = 3*(0.0,);
# ctrl.rotor_inertias.value = np.array(conf.ROTOR_INERTIAS);
# ctrl.gear_ratios.value = conf.GEAR_RATIOS;
ctrl.rotor_inertias.value = tuple([g*g*r for (g,r) in zip(conf.GEAR_RATIOS, conf.ROTOR_INERTIAS)])
ctrl.gear_ratios.value = NJ*(1.0,);
ctrl.contact_normal.value = conf.FOOT_CONTACT_NORMAL;
ctrl.contact_points.value = conf.RIGHT_FOOT_CONTACT_POINTS;
ctrl.f_min.value = conf.fMin;
ctrl.f_max_right_foot.value = conf.fMax;
ctrl.f_max_left_foot.value = conf.fMax;
ctrl.mu.value = conf.mu[0];
ctrl.weight_contact_forces.value = (1e2, 1e2, 1e0, 1e3, 1e3, 1e3);
ctrl.kp_com.value = 3*(conf.kp_com,);
ctrl.kd_com.value = 3*(conf.kd_com,);
ctrl.kp_constraints.value = 6*(conf.kp_constr,);
ctrl.kd_constraints.value = 6*(conf.kd_constr,);
ctrl.kp_feet.value = 6*(conf.kp_feet,);
ctrl.kd_feet.value = 6*(conf.kd_feet,);
ctrl.kp_posture.value = conf.kp_posture;
ctrl.kd_posture.value = conf.kd_posture;
ctrl.kp_pos.value = conf.kp_pos;
ctrl.kd_pos.value = conf.kd_pos;
ctrl.w_com.value = conf.w_com;
ctrl.w_feet.value = conf.w_feet;
ctrl.w_forces.value = conf.w_forces;
ctrl.w_posture.value = conf.w_posture;
ctrl.w_base_orientation.value = conf.w_base_orientation;
ctrl.w_torques.value = conf.w_torques;
ctrl.active_joints.value = NJ*(1,);
ctrl.init(dt, robot_name);
return ctrl;
print "*** UNIT TEST FOR INVERSE-DYNAMICS-BALANCE-CONTROLLER (IDBC) ***"
print "This test computes the torques using the IDBC and compares them with"
print "the torques computed using the desired joint accelerations and contact"
print "wrenches computed by the IDBC. The two values should be identical."
print "Some small differences are expected due to the precision loss when"
print "Passing the parameters from python to c++."
print "However, none of the following values should be larger than 1e-3.\n"
N_TESTS = 100
dt = 0.001;
NJ = initRobotData.nbJoints
# robot configuration
q_sot = np.array([-0.0027421149619457344, -0.0013842807952574399, 0.6421082804660067,
-0.0005693871512031474, -0.0013094048521806974, 0.0028568508070167,
-0.0006369040657361668, 0.002710094953239396, -0.48241992906618536, 0.9224570746372157, -0.43872624301275104, -0.0021586727954009096,
-0.0023395862060549863, 0.0031045906573987617, -0.48278188636903313, 0.9218508861779927, -0.4380058166724791, -0.0025558837738616047,
-0.012985322450541008, 0.04430420221275542, 0.37027327677517635, 1.4795064165303056,
0.20855551221055582, -0.13188842278441873, 0.005487207370709895, -0.2586657542648506, 2.6374918629921953, -0.004223605878088189, 0.17118034021053144, 0.24171737354070008, 0.11594430024547904, -0.05264225067057105, -0.4691871937149223, 0.0031522040623960016, 0.011836097472447007, 0.18425595002313025]);
ctrl_manager = create_ctrl_manager(control_manager_conf, dt);
ctrl = create_balance_controller(dt, q_sot, balance_ctrl_conf);
robot = RobotWrapper(initRobotData.testRobotPath, [], se3.JointModelFreeFlyer())
index_rf = robot.index('RLEG_JOINT5');
index_lf = robot.index('LLEG_JOINT5');
Md = np.matrix(np.zeros((NJ+6,NJ+6)));
gr = joints_sot_to_urdf(balance_ctrl_conf.GEAR_RATIOS);
ri = joints_sot_to_urdf(balance_ctrl_conf.ROTOR_INERTIAS);
for i in range(NJ):
Md[6+i,6+i] = ri[i] * gr[i] * gr[i];
for i in range(N_TESTS):
q_sot += 0.001*np.random.random(NJ+6);
v_sot = np.random.random(NJ+6);
q_pin = np.matrix(config_sot_to_urdf(q_sot));
v_pin = np.matrix(velocity_sot_to_urdf(v_sot));
ctrl.q.value = tuple(q_sot);
ctrl.v.value = tuple(v_sot);
ctrl.tau_des.recompute(i);
tau_ctrl = joints_sot_to_urdf(np.array(ctrl.tau_des.value));
ctrl.dv_des.recompute(i);
dv = velocity_sot_to_urdf(np.array(ctrl.dv_des.value));
M = Md + robot.mass(q_pin);
h = robot.bias(q_pin, v_pin);
ctrl.f_des_right_foot.recompute(i);
ctrl.f_des_left_foot.recompute(i);
f_rf = np.matrix(ctrl.f_des_right_foot.value).T;
f_lf = np.matrix(ctrl.f_des_left_foot.value).T;
J_rf = robot.jacobian(q_pin, index_rf);
J_lf = robot.jacobian(q_pin, index_lf);
tau_pin = M*np.matrix(dv).T + h - J_rf.T * f_rf - J_lf.T * f_lf;
# ctrl.M.recompute(i);
# M_ctrl = np.array(ctrl.M.value);
print "norm(tau_ctrl-tau_pin) = %.4f"% np.linalg.norm(tau_ctrl - tau_pin[6:,0].T);
print "norm(tau_pin[:6]) = %.4f"% np.linalg.norm(tau_pin[:6]);
# print "q_pin:\n", q_pin;
# print "tau_pin:\n", tau_pin[6:,0].T, "\n";
# print "tau ctrl:\n", tau_ctrl.T, "\n";
# print "dv = ", np.linalg.norm(dv);
# print "f_rf:", f_rf.T, "\n";
# print "f_lf:", f_lf.T, "\n";
# print "h:", h.T, "\n";
# M_err = M-M_ctrl
# print "M-M_ctrl = ", M_err.diagonal(), "\n"
# for j in range(NJ+6):
# print M_err[j,:];
| proyan/sot-torque-control | unitTesting/unit_test_inverse_dynamics_balance_controller.py | Python | gpl-3.0 | 6,246 |
"""
YieldFrom astroid node
This node represents the Python "yield from" statement, which functions
similarly to the "yield" statement except that the generator can delegate
some generating work to another generator.
Attributes:
- value (GeneratorExp)
- The generator that this YieldFrom is delegating work to.
Example:
- value -> Call(range, Name('g', Load()))
"""
def fun(g):
yield from range(g)
| shweta97/pyta | nodes/YieldFrom.py | Python | gpl-3.0 | 423 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GeoNamesMatchingLogMatchedPlaces.remark'
db.add_column('united_geonames_geonamesmatchinglogmatchedplaces', 'remark', self.gf('django.db.models.fields.TextField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'GeoNamesMatchingLogMatchedPlaces.remark'
db.delete_column('united_geonames_geonamesmatchinglogmatchedplaces', 'remark')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 7, 25, 14, 53, 19, 34425)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 7, 25, 14, 53, 19, 34316)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'united_geonames.geonamesmatchinglogmatch': {
'Meta': {'ordering': "['-matching_index']", 'object_name': 'GeoNamesMatchingLogMatch'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'display_for_users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'matching_index': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '233', 'null': 'True', 'blank': 'True'}),
'number_of_alternatives': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'})
},
'united_geonames.geonamesmatchinglogmatchedplaces': {
'Meta': {'object_name': 'GeoNamesMatchingLogMatchedPlaces'},
'best_match': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'geographical_distance': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'matchinglogmatch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'matched'", 'null': 'True', 'to': "orm['united_geonames.GeoNamesMatchingLogMatch']"}),
'ngram_distance': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'percentage': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'remark': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'united_geoname': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['united_geonames.UnitedGeoName']", 'null': 'True', 'blank': 'True'})
},
'united_geonames.unitedgeoname': {
'Meta': {'object_name': 'UnitedGeoName'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main_name': ('django.db.models.fields.CharField', [], {'max_length': '300'})
},
'united_geonames.unitedgeonamesynonim': {
'Meta': {'object_name': 'UnitedGeoNameSynonim'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'coordinates': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'spatial_index': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'subregion': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'synonim_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'synonim_content_type_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'synonim_name': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'synonim_object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'united_geoname': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'geonames'", 'null': 'True', 'to': "orm['united_geonames.UnitedGeoName']"})
},
'united_geonames.usergeoname': {
'Meta': {'object_name': 'UserGeoName'},
'coordinates': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'spatial_index': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'subregion': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'united_geonames.userproject': {
'Meta': {'object_name': 'UserProject'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['united_geonames']
| justinasjaronis/hpn | united_geonames/migrations/0015_auto__add_field_geonamesmatchinglogmatchedplaces_remark.py | Python | gpl-3.0 | 9,334 |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.utils.translation import ugettext_noop as _
from geonode.notifications_helper import NotificationsAppConfigBase
class PeopleAppConfig(NotificationsAppConfigBase):
name = 'geonode.people'
NOTIFICATIONS = (("user_follow", _("User following you"), _("Another user has started following you"),),
("account_approve", _("User requested access"),
_("A new user has requested access to the site"),),
("account_active", _("Account activated"),
_("This account is now active and can log in the site"),),
)
def ready(self):
super(PeopleAppConfig, self).ready()
default_app_config = 'geonode.people.PeopleAppConfig'
| kartoza/geonode | geonode/people/__init__.py | Python | gpl-3.0 | 1,584 |
#!/usr/bin/python -tt
# An incredibly simple agent. All we do is find the closest enemy tank, drive
# towards it, and shoot. Note that if friendly fire is allowed, you will very
# often kill your own tanks with this code.
#################################################################
# NOTE TO STUDENTS
# This is a starting point for you. You will need to greatly
# modify this code if you want to do anything useful. But this
# should help you to know how to interact with BZRC in order to
# get the information you need.
#
# After starting the bzrflag server, this is one way to start
# this code:
# python agent0.py [hostname] [port]
#
# Often this translates to something like the following (with the
# port name being printed out by the bzrflag server):
# python agent0.py localhost 49857
#################################################################
import sys
import math
import time
from bzrc import BZRC, Command
class Agent(object):
"""Class handles all command and control logic for a teams tanks."""
def __init__(self, bzrc):
self.bzrc = bzrc
self.constants = self.bzrc.get_constants()
self.commands = []
def tick(self, time_diff):
"""Some time has passed; decide what to do next."""
mytanks, othertanks, flags, shots, obstacles = self.bzrc.get_lots_o_stuff()
self.mytanks = mytanks
self.othertanks = othertanks
self.flags = flags
self.shots = shots
self.enemies = [tank for tank in othertanks if tank.color !=
self.constants['team']]
self.commands = []
for tank in mytanks:
self.attack_enemies(tank)
results = self.bzrc.do_commands(self.commands)
def attack_enemies(self, tank):
"""Find the closest enemy and chase it, shooting as you go."""
best_enemy = None
best_dist = 2 * float(self.constants['worldsize'])
for enemy in self.enemies:
if enemy.status != 'alive':
continue
dist = math.sqrt((enemy.x - tank.x)**2 + (enemy.y - tank.y)**2)
if dist < best_dist:
best_dist = dist
best_enemy = enemy
if best_enemy is None:
command = Command(tank.index, 0, 0, False)
self.commands.append(command)
else:
self.move_to_position(tank, best_enemy.x, best_enemy.y)
def move_to_position(self, tank, target_x, target_y):
"""Set command to move to given coordinates."""
target_angle = math.atan2(target_y - tank.y,
target_x - tank.x)
relative_angle = self.normalize_angle(target_angle - tank.angle)
command = Command(tank.index, 1, 2 * relative_angle, True)
self.commands.append(command)
def normalize_angle(self, angle):
"""Make any angle be between +/- pi."""
angle -= 2 * math.pi * int (angle / (2 * math.pi))
if angle <= -math.pi:
angle += 2 * math.pi
elif angle > math.pi:
angle -= 2 * math.pi
return angle
def main():
# Process CLI arguments.
try:
execname, host, port = sys.argv
except ValueError:
execname = sys.argv[0]
print >>sys.stderr, '%s: incorrect number of arguments' % execname
print >>sys.stderr, 'usage: %s hostname port' % sys.argv[0]
sys.exit(-1)
# Connect.
#bzrc = BZRC(host, int(port), debug=True)
bzrc = BZRC(host, int(port))
agent = Agent(bzrc)
prev_time = time.time()
# Run the agent
try:
while True:
time_diff = time.time() - prev_time
agent.tick(time_diff)
except KeyboardInterrupt:
print "Exiting due to keyboard interrupt."
bzrc.close()
if __name__ == '__main__':
main()
# vim: et sw=4 sts=4
| bweaver2/bzrFlag | bzagents/agent0.py | Python | gpl-3.0 | 3,861 |
#!/usr/bin/python
#
# Problem: Endless Knight
# Language: Python
# Author: KirarinSnow
# Usage: python thisfile.py <input.in >output.out
# Comments: OK for large, but may time out on small.
from itertools import *
MOD = 10007
# Precompute factorial table mod MOD
fact = [1] * MOD
for i in xrange(1, MOD):
fact[i] = (fact[i-1] * i)
# n choose k -- using Lucas's theorem
def choose(n, k):
if k > n:
return 0
elif n < MOD:
return (fact[n]/fact[n-k]/fact[k])%MOD
else:
prod = 1
while n > 0:
prod *= choose(n%MOD, k%MOD)
prod %= MOD
n /= MOD
k /= MOD
return prod
def compute():
h, w, r = map(int, raw_input().split())
rocks = [map(int, raw_input().split()) for i in range(r)]
if (h+w-2)%3 != 0:
return 0
# normalize rock coordinates
h, w = h-1-(h+w-2)/3, w-1-(h+w-2)/3
for i in range(r):
row, col = rocks[i]
if (row+col-2)%3 != 0:
rocks[i] = None
else:
rocks[i] = [row-1-(row+col-2)/3, col-1-(row+col-2)/3]
if rocks[i][0] < 0 or rocks[i][0] > h:
rocks[i] = None
elif rocks[i][1] < 0 or rocks[i][1] > w:
rocks[i] = None
total = 0
for num in range(r+1):
for perm in permutations(range(r), num):
# verify increasing property of permutation
inc = True
for i in range(num):
if rocks[perm[i]] == None:
inc = False
break
if i > 0:
if rocks[perm[i]][0] < rocks[perm[i-1]][0]:
inc = False
break
if rocks[perm[i]][1] < rocks[perm[i-1]][1]:
inc = False
break
if inc:
points = [[0,0]] + [rocks[j] for j in perm] + [[h,w]]
# number of paths going through all points
prod = 1
for j in range(1, len(points)):
dh = points[j][0] - points[j-1][0]
dw = points[j][1] - points[j-1][1]
prod *= choose(dh+dw, dw)
prod %= MOD
# inclusion-exclusion
total += (-1)**num * prod
total %= MOD
return total
for i in range(input()):
print "Case #%d: %d" % (i+1, compute())
| KirarinSnow/Google-Code-Jam | Round 3 2008/D1.py | Python | gpl-3.0 | 2,492 |
"""
This module provides functions that generate commonly used Hamiltonian terms.
"""
__all__ = [
"Annihilator",
"Creator",
"CPFactory",
"HoppingFactory",
"PairingFactory",
"HubbardFactory",
"CoulombFactory",
"HeisenbergFactory",
"IsingFactory",
"TwoSpinTermFactory",
]
from HamiltonianPy.quantumoperator.constant import ANNIHILATION, CREATION, \
SPIN_DOWN, SPIN_UP
from HamiltonianPy.quantumoperator.particlesystem import AoC, ParticleTerm
from HamiltonianPy.quantumoperator.spinsystem import *
def Creator(site, spin=0, orbit=0):
"""
Generate creation operator: $c_i^{\\dagger}$.
Parameters
----------
site : list, tuple or 1D np.ndarray
The coordinates of the localized single-particle state.
The `site` parameter should be 1D array with length 1,2 or 3.
spin : int, optional
The spin index of the single-particle state.
Default: 0.
orbit : int, optional
The orbit index of the single-particle state.
Default: 0.
Returns
-------
operator : AoC
The corresponding creation operator.
Examples
--------
>>> from HamiltonianPy.quantumoperator import Creator
>>> Creator((0, 0), spin=1)
AoC(otype=CREATION, site=(0, 0), spin=1, orbit=0)
"""
return AoC(CREATION, site=site, spin=spin, orbit=orbit)
def Annihilator(site, spin=0, orbit=0):
"""
Generate annihilation operator: $c_i$.
Parameters
----------
site : list, tuple or 1D np.ndarray
The coordinates of the localized single-particle state.
The `site` parameter should be 1D array with length 1,2 or 3.
spin : int, optional
The spin index of the single-particle state.
Default: 0.
orbit : int, optional
The orbit index of the single-particle state.
Default: 0.
Returns
-------
operator : AoC
The corresponding annihilation operator.
Examples
--------
>>> from HamiltonianPy.quantumoperator import Annihilator
>>> Annihilator((0, 0), spin=0)
AoC(otype=ANNIHILATION, site=(0, 0), spin=0, orbit=0)
"""
return AoC(ANNIHILATION, site=site, spin=spin, orbit=orbit)
def CPFactory(site, *, spin=0, orbit=0, coeff=1.0):
"""
Generate chemical potential term: '$\\mu c_i^{\\dagger} c_i$'.
Parameters
----------
site : list, tuple or 1D np.ndarray
The coordinates of the localized single-particle state.
The `site` parameter should be 1D array with length 1,2 or 3.
spin : int, optional, keyword-only
The spin index of the single-particle state.
Default: 0.
orbit : int, optional, keyword-only
The orbit index of the single-particle state.
Default: 0.
coeff : int or float, optional, keyword-only
The coefficient of this term.
Default: 1.0.
Returns
-------
term : ParticleTerm
The corresponding chemical potential term.
Examples
--------
>>> from HamiltonianPy.quantumoperator import CPFactory
>>> term = CPFactory((0, 0))
>>> print(term)
The coefficient of this term: 1.0
The component operators:
AoC(otype=CREATION, site=(0, 0), spin=0, orbit=0)
AoC(otype=ANNIHILATION, site=(0, 0), spin=0, orbit=0)
"""
c = AoC(CREATION, site=site, spin=spin, orbit=orbit)
a = AoC(ANNIHILATION, site=site, spin=spin, orbit=orbit)
return ParticleTerm((c, a), coeff=coeff, classification="number")
def HoppingFactory(
site0, site1, *, spin0=0, spin1=None, orbit0=0, orbit1=None, coeff=1.0
):
"""
Generate hopping term: '$t c_i^{\\dagger} c_j$'.
These parameters suffixed with '0' are for the creation operator and '1'
for annihilation operator.
Parameters
----------
site0, site1 : list, tuple or 1D np.ndarray
The coordinates of the localized single-particle state.
`site0` and `site1` should be 1D array with length 1, 2 or 3.
spin0, spin1 : int, optional, keyword-only
The spin index of the single-particle state.
The default value for `spin0` is 0;
The default value for `spin1` is None, which implies that `spin1`
takes the same value as `spin0`.
orbit0, orbit1 : int, optional, keyword-only
The orbit index of the single-particle state.
The default value for `orbit0` is 0;
The default value for `orbit1` is None, which implies that `orbit1`
takes the same value as `orbit0`.
coeff : int, float or complex, optional, keyword-only
The coefficient of this term.
Default: 1.0.
Returns
-------
term : ParticleTerm
The corresponding hopping term.
Examples
--------
>>> from HamiltonianPy.quantumoperator import HoppingFactory
>>> term = HoppingFactory(site0=(0, 0), site1=(1, 1), spin0=1)
>>> print(term)
The coefficient of this term: 1.0
The component operators:
AoC(otype=CREATION, site=(0, 0), spin=1, orbit=0)
AoC(otype=ANNIHILATION, site=(1, 1), spin=1, orbit=0)
>>> term = HoppingFactory(site0=(0, 0), site1=(1, 1), spin0=0, spin1=1)
>>> print(term)
The coefficient of this term: 1.0
The component operators:
AoC(otype=CREATION, site=(0, 0), spin=0, orbit=0)
AoC(otype=ANNIHILATION, site=(1, 1), spin=1, orbit=0)
"""
if spin1 is None:
spin1 = spin0
if orbit1 is None:
orbit1 = orbit0
c = AoC(CREATION, site=site0, spin=spin0, orbit=orbit0)
a = AoC(ANNIHILATION, site=site1, spin=spin1, orbit=orbit1)
classification = "hopping" if c.state != a.state else "number"
return ParticleTerm((c, a), coeff=coeff, classification=classification)
def PairingFactory(
site0, site1, *, spin0=0, spin1=0, orbit0=0, orbit1=0,
coeff=1.0, which="h"
):
"""
Generate pairing term: '$p c_i^{\\dagger} c_j^{\\dagger}$' or '$p c_i c_j$'.
These parameters suffixed with '0' are for the 1st operator and '1' for
2nd operator.
Parameters
----------
site0, site1 : list, tuple or 1D np.ndarray
The coordinates of the localized single-particle state.
`site0` and `site1` should be 1D array with length 1, 2 or 3.
spin0, spin1 : int, optional, keyword-only
The spin index of the single-particle state.
Default: 0.
orbit0, orbit1 : int, optional, keyword-only
The orbit index of the single-particle state.
Default: 0.
coeff : int, float or complex, optional, keyword-only
The coefficient of this term.
Default: 1.0.
which : str, optional, keyword-only
Determine whether to generate a particle- or hole-pairing term.
Valid values:
["h" | "hole"] for hole-pairing;
["p" | "particle"] for particle-pairing.
Default: "h".
Returns
-------
term : ParticleTerm
The corresponding pairing term.
Examples
--------
>>> from HamiltonianPy.quantumoperator import PairingFactory
>>> term = PairingFactory((0, 0), (1, 1), spin0=0, spin1=1, which="h")
>>> print(term)
The coefficient of this term: 1.0
The component operators:
AoC(otype=ANNIHILATION, site=(0, 0), spin=0, orbit=0)
AoC(otype=ANNIHILATION, site=(1, 1), spin=1, orbit=0)
>>> term = PairingFactory((0, 0), (1, 1), spin0=0, spin1=1, which="p")
>>> print(term)
The coefficient of this term: 1.0
The component operators:
AoC(otype=CREATION, site=(0, 0), spin=0, orbit=0)
AoC(otype=CREATION, site=(1, 1), spin=1, orbit=0)
"""
assert which in ("h", "hole", "p", "particle")
otype = ANNIHILATION if which in ("h", "hole") else CREATION
aoc0 = AoC(otype, site=site0, spin=spin0, orbit=orbit0)
aoc1 = AoC(otype, site=site1, spin=spin1, orbit=orbit1)
return ParticleTerm((aoc0, aoc1), coeff=coeff)
def HubbardFactory(site, *, orbit=0, coeff=1.0):
"""
Generate Hubbard term: '$U n_{i\\uparrow} n_{i\\downarrow}$'.
This function is valid only for spin-1/2 system.
Parameters
----------
site : list, tuple or 1D np.ndarray
The coordinates of the localized single-particle state.
`site` should be 1D array with length 1,2 or 3.
orbit : int, optional, keyword-only
The orbit index of the single-particle state.
Default: 0.
coeff : int or float, optional, keyword-only
The coefficient of this term.
Default: 1.0.
Returns
-------
term : ParticleTerm
The corresponding Hubbard term.
Examples
--------
>>> from HamiltonianPy.quantumoperator import HubbardFactory
>>> term = HubbardFactory(site=(0, 0))
>>> print(term)
The coefficient of this term: 1.0
The component operators:
AoC(otype=CREATION, site=(0, 0), spin=1, orbit=0)
AoC(otype=ANNIHILATION, site=(0, 0), spin=1, orbit=0)
AoC(otype=CREATION, site=(0, 0), spin=0, orbit=0)
AoC(otype=ANNIHILATION, site=(0, 0), spin=0, orbit=0)
"""
c_up = AoC(CREATION, site=site, spin=SPIN_UP, orbit=orbit)
c_down = AoC(CREATION, site=site, spin=SPIN_DOWN, orbit=orbit)
a_up = AoC(ANNIHILATION, site=site, spin=SPIN_UP, orbit=orbit)
a_down = AoC(ANNIHILATION, site=site, spin=SPIN_DOWN, orbit=orbit)
return ParticleTerm(
(c_up, a_up, c_down, a_down), coeff=coeff, classification="Coulomb"
)
def CoulombFactory(
site0, site1, *, spin0=0, spin1=0, orbit0=0, orbit1=0, coeff=1.0
):
"""
Generate Coulomb interaction term: '$U n_i n_j$'.
These parameters suffixed with '0' are for the 1st operator and '1' for
2nd operator.
Parameters
----------
site0, site1 : list, tuple or 1D np.ndarray
The coordinates of the localized single-particle state.
`site0` and `site1` should be 1D array with length 1, 2 or 3.
spin0, spin1 : int, optional, keyword-only
The spin index of the single-particle state.
Default: 0.
orbit0, orbit1 : int, optional, keyword-only
The orbit index of the single-particle state.
Default: 0.
coeff : int or float, optional, keyword-only
The coefficient of this term.
Default: 1.0.
Returns
-------
term : ParticleTerm
The corresponding Coulomb interaction term.
Examples
--------
>>> from HamiltonianPy.quantumoperator import CoulombFactory
>>> term = CoulombFactory((0, 0), (1, 1), spin0=0, spin1=1)
>>> print(term)
The coefficient of this term: 1.0
The component operators:
AoC(otype=CREATION, site=(0, 0), spin=0, orbit=0)
AoC(otype=ANNIHILATION, site=(0, 0), spin=0, orbit=0)
AoC(otype=CREATION, site=(1, 1), spin=1, orbit=0)
AoC(otype=ANNIHILATION, site=(1, 1), spin=1, orbit=0)
"""
c0 = AoC(CREATION, site=site0, spin=spin0, orbit=orbit0)
a0 = AoC(ANNIHILATION, site=site0, spin=spin0, orbit=orbit0)
c1 = AoC(CREATION, site=site1, spin=spin1, orbit=orbit1)
a1 = AoC(ANNIHILATION, site=site1, spin=spin1, orbit=orbit1)
return ParticleTerm((c0, a0, c1, a1), coeff=coeff, classification="Coulomb")
def HeisenbergFactory(site0, site1, *, coeff=1.0):
"""
Generate Heisenberg interaction term: '$J S_i S_j$'.
Parameters
----------
site0, site1 : list, tuple or 1D np.ndarray
The coordinates of the lattice site on which the spin operator is
defined. `site0` and `site1` should be 1D array with length 1,
2 or 3. `site0` for the first spin operator and `site1` for the
second spin operator.
coeff : int or float, optional, keyword-only
The coefficient of this term.
Default: 1.0.
Returns
-------
terms : 3-tuple
terms[0] is the '$J S_i^z S_j^z$' term;
terms[1] is the '$J/2 S_i^+ S_j^-$' term;
terms[2] is the '$J/2 S_i^- S_j^+$' term.
Examples
--------
>>> from HamiltonianPy.quantumoperator import HeisenbergFactory
>>> term = HeisenbergFactory((0, 0), (1, 1))
>>> print(term[0])
The coefficient of this term: 1.0
The component operators:
SpinOperator(otype="z", site=(0, 0))
SpinOperator(otype="z", site=(1, 1))
>>> print(term[1])
The coefficient of this term: 0.5
The component operators:
SpinOperator(otype="p", site=(0, 0))
SpinOperator(otype="m", site=(1, 1))
>>> print(term[2])
The coefficient of this term: 0.5
The component operators:
SpinOperator(otype="m", site=(0, 0))
SpinOperator(otype="p", site=(1, 1))
"""
sz0 = SpinOperator(otype="z", site=site0)
sp0 = SpinOperator(otype="p", site=site0)
sm0 = SpinOperator(otype="m", site=site0)
sz1 = SpinOperator(otype="z", site=site1)
sp1 = SpinOperator(otype="p", site=site1)
sm1 = SpinOperator(otype="m", site=site1)
return (
SpinInteraction((sz0, sz1), coeff=coeff),
SpinInteraction((sp0, sm1), coeff=coeff/2),
SpinInteraction((sm0, sp1), coeff=coeff/2),
)
def IsingFactory(site0, site1, alpha, *, coeff=1.0):
"""
Generate Ising type spin interaction term:
'$J S_i^{\\alpha} S_j^{\\alpha}$'
Parameters
----------
site0, site1 : list, tuple or 1D np.ndarray
The coordinates of the lattice site on which the spin operator is
defined. `site0` and `site1` should be 1D array with length 1,
2 or 3. `site0` for the first spin operator and `site1` for the
second spin operator.
alpha : {"x", "y" or "z"}
Which type of spin operator is involved.
coeff : int or float, optional, keyword-only
The coefficient of this term.
Default: 1.0.
Returns
-------
term: SpinInteraction
The corresponding spin interaction term.
Examples
--------
>>> from HamiltonianPy.quantumoperator import IsingFactory
>>> term = IsingFactory((0, 0), (1, 1), "x")
>>> print(term)
The coefficient of this term: 1.0
The component operators:
SpinOperator(otype="x", site=(0, 0))
SpinOperator(otype="x", site=(1, 1))
"""
assert alpha in ("x", "y", "z")
s0_alpha = SpinOperator(otype=alpha, site=site0)
s1_alpha = SpinOperator(otype=alpha, site=site1)
return SpinInteraction((s0_alpha, s1_alpha), coeff=coeff)
def TwoSpinTermFactory(site0, site1, alpha0, alpha1, *, coeff=1.0):
"""
Generate general two spin interaction term:
'$J S_i^{\\alpha} S_j^{\\beta}$'
Parameters
----------
site0, site1 : list, tuple or 1D np.ndarray
The coordinates of the lattice site on which the spin operator is
defined. `site0` and `site1` should be 1D array with length 1,
2 or 3. `site0` for the first spin operator and `site1` for the
second spin operator.
alpha0, alpha1 : {"x", "y" or "z"}
Which type of spin operator is involved.
`alpha0` for the first and `alpha1` for the second spin operator.
coeff : int or float, optional, keyword-only
The coefficient of this term.
Default: 1.0.
Returns
-------
term: SpinInteraction
The corresponding spin interaction term.
Examples
--------
>>> from HamiltonianPy.quantumoperator import TwoSpinTermFactory
>>> term = TwoSpinTermFactory((0, 0), (1, 1), alpha0="x", alpha1="y")
>>> print(term)
The coefficient of this term: 1.0
The component operators:
SpinOperator(otype="x", site=(0, 0))
SpinOperator(otype="y", site=(1, 1))
"""
assert alpha0 in ("x", "y", "z")
assert alpha1 in ("x", "y", "z")
s0_alpha = SpinOperator(otype=alpha0, site=site0)
s1_alpha = SpinOperator(otype=alpha1, site=site1)
return SpinInteraction((s0_alpha, s1_alpha), coeff=coeff)
| wangshiphys/HamiltonianPy | HamiltonianPy/quantumoperator/factory.py | Python | gpl-3.0 | 15,843 |
"""
Copyright 2016 Puffin Software. All rights reserved.
"""
from com.puffinware.pistat.models import User, Location, Category, Thermostat, Sensor, Reading
from com.puffinware.pistat import DB
from logging import getLogger
log = getLogger(__name__)
def setup_db(app):
DB.create_tables([User, Location, Category, Thermostat, Sensor, Reading], safe=True)
# This hook ensures that a connection is opened to handle any queries
# generated by the request.
@app.before_request
def _db_connect():
log.debug('DB Connect')
DB.connect()
# This hook ensures that the connection is closed when we've finished
# processing the request.
@app.teardown_request
def _db_close(exc):
if not DB.is_closed():
log.debug('DB Close')
DB.close() | PuffinWare/pistat | com/puffinware/pistat/db.py | Python | gpl-3.0 | 767 |
x = int(input())
y = int(input())
print('In this test case x =', x, 'and y =', y)
if x >= y:
print('(The maximum is x)')
theMax = x
else:
print('(The maximum is y)')
theMax = y
print('The maximum is', theMax)
| cemc/cscircles-wp-content | lesson_files/lesson9/if.py | Python | gpl-3.0 | 225 |
from django.conf.urls import url
from django.contrib.auth.views import login, \
logout, \
logout_then_login, \
password_change, \
password_change_done, \
password_reset, \
password_reset_done, \
password_reset_confirm, \
password_reset_complete
from . import views
urlpatterns = [
url(r'^$', views.dashboard, name='dashboard'),
# login / logout urls
url(r'^login/$', view=login, name='login'),
url(r'^logout/$', view=logout, name='logout'),
url(r'^logout-then-login/$', view=logout_then_login, name='logout_then_login'),
# change password urls
url(r'^password-change/$', view=password_change, name='password_change'),
url(r'^password-change/done/$', view=password_change_done, name='password_change_done'),
# restore password urls
url(r'^password-reset/$', view=password_reset, name='password_reset'),
url(r'^password-reset/done/$', view=password_reset_done, name='password_reset_done'),
url(r'^password-reset/confirm/(?P<uidb64>[-\w]+)/(?P<token>[-\w]+)/$', view=password_reset_confirm, name='password_reset_confirm'),
url(r'^password-reset/complete/$', view=password_reset_complete, name='password_reset_complete'),
] | t104801/webapp | security/urls.py | Python | gpl-3.0 | 1,479 |
import pygame
from Explosion import Explosion
class Bullet(object):
PLAYER, ENEMY = 1, 0
def __init__(self, manager, parent, init_pos, direction, speed=3):
self.manager = manager
self.parent = parent
self.image = pygame.image.load("res/tanks/bullet.png")
self.explosion = pygame.image.load("res/explosions/bullet_explosion.png")
self.rect = self.calculate_init_point(direction, init_pos)
self.speed = self.calculate_speed(direction, speed)
def calculate_speed(self, direction, speed):
if direction == 0: # Up
return (0, -speed)
if direction == 1: # Down
self.image = pygame.transform.rotate(self.image, 180)
return (0, speed)
if direction == 2: # Left
self.image = pygame.transform.rotate(self.image, 90)
return (-speed, 0)
if direction == 3: # Right
self.image = pygame.transform.rotate(self.image, -90)
return (speed, 0)
def calculate_init_point(self, direction, init_pos):
rect = self.image.get_rect()
posX = init_pos[0]
posY = init_pos[1]
if direction == 0:
rect.x = posX + 12
rect.y = posY - 14
if direction == 1:
rect.x = posX + 12
rect.y = posY + 32
if direction == 2:
rect.x = posX - 14
rect.y = posY + 12
if direction == 3:
rect.x = posX + 32
rect.y = posY + 12
return rect
def update(self, blocks):
posX = self.speed[0]
posY = self.speed[1]
self.rect.x += posX
self.rect.y += posY
# Si nos vamos a salir del mundo, explotamos
if self.rect.x < 0:
self.rect.x = 0
self.explode()
if self.rect.x > 632:
self.rect.x = 632
self.explode()
if self.rect.y < 0:
self.rect.y = 0
self.explode()
if self.rect.y > 568:
self.rect.y = 568
self.explode()
crashed = False
# Check if we crashed with another block
for block in blocks:
# We can't crash with ourselves... can we?
if block == self:
pass
# If we do crash, we tell the manager to destroy said block
elif self.rect.colliderect(block):
# Right after we check if we can destroy said block
block_name = type(block).__name__
if block_name in ["Block", "Heart", "Bullet"]:
self.impact_side(block)
if self.manager.destroy_element(block): # Block tells us if it destroyed
crashed = True
else: # Else, we explode
self.explode()
elif block_name == "Enemy" and self.parent: # Player bullet against enemy
self.impact_side(block)
# If enemy tells us it destroyed, it's a kill
if self.manager.destroy_element(block):
self.manager.increment_kills()
crashed = True
else: # Else, we explode
self.explode()
elif block_name == "Enemy" and not self.parent: # Enemy bullet hitting enemy
crashed = True
elif block_name == "Jugador" and not self.parent: # Enemy bullet hitting the player
self.impact_side(block)
# If the player destroys, we destroy
if self.manager.destroy_element(block):
crashed = True
else: # Else, we explode
self.explode()
else:
pass
if crashed: # If we crashed, we destroy ourselves
self.destroy()
def destroy(self):
if self.parent == self.PLAYER:
self.manager.remove_player_bullet()
self.manager.remove_bullet(self)
return True
def explode(self):
if self.parent == self.PLAYER:
self.manager.remove_player_bullet()
# Create the explosion
Explosion(self.manager, self.rect)
self.manager.remove_bullet(self)
return True
def impact_side(self, block):
posX = self.speed[0]
posY = self.speed[1]
if posX > 0: # Left side
self.rect.right = block.rect.left
if posX < 0: # Right side
self.rect.left = block.rect.right
if posY > 0: # Upper side
self.rect.bottom = block.rect.top
if posY < 0: # Lower side
self.rect.top = block.rect.bottom | Vicyorus/BattleTank | src/Bullet.py | Python | gpl-3.0 | 5,190 |
# -*- coding: utf-8 -*-
#
from rest_framework.viewsets import ModelViewSet
from rest_framework.generics import RetrieveAPIView, ListAPIView
from django.shortcuts import get_object_or_404
from django.db.models import Q
from common.utils import get_logger, get_object_or_none
from common.mixins.api import SuggestionMixin
from users.models import User, UserGroup
from users.serializers import UserSerializer, UserGroupSerializer
from users.filters import UserFilter
from perms.models import AssetPermission
from perms.serializers import AssetPermissionSerializer
from perms.filters import AssetPermissionFilter
from orgs.mixins.api import OrgBulkModelViewSet
from orgs.mixins import generics
from assets.api import FilterAssetByNodeMixin
from ..models import Asset, Node, Platform
from .. import serializers
from ..tasks import (
update_assets_hardware_info_manual, test_assets_connectivity_manual,
test_system_users_connectivity_a_asset, push_system_users_a_asset
)
from ..filters import FilterAssetByNodeFilterBackend, LabelFilterBackend, IpInFilterBackend
logger = get_logger(__file__)
__all__ = [
'AssetViewSet', 'AssetPlatformRetrieveApi',
'AssetGatewayListApi', 'AssetPlatformViewSet',
'AssetTaskCreateApi', 'AssetsTaskCreateApi',
'AssetPermUserListApi', 'AssetPermUserPermissionsListApi',
'AssetPermUserGroupListApi', 'AssetPermUserGroupPermissionsListApi',
]
class AssetViewSet(SuggestionMixin, FilterAssetByNodeMixin, OrgBulkModelViewSet):
"""
API endpoint that allows Asset to be viewed or edited.
"""
model = Asset
filterset_fields = {
'hostname': ['exact'],
'ip': ['exact'],
'system_users__id': ['exact'],
'platform__base': ['exact'],
'is_active': ['exact'],
'protocols': ['exact', 'icontains']
}
search_fields = ("hostname", "ip")
ordering_fields = ("hostname", "ip", "port", "cpu_cores")
ordering = ('hostname', )
serializer_classes = {
'default': serializers.AssetSerializer,
'suggestion': serializers.MiniAssetSerializer
}
rbac_perms = {
'match': 'assets.match_asset'
}
extra_filter_backends = [FilterAssetByNodeFilterBackend, LabelFilterBackend, IpInFilterBackend]
def set_assets_node(self, assets):
if not isinstance(assets, list):
assets = [assets]
node_id = self.request.query_params.get('node_id')
if not node_id:
return
node = get_object_or_none(Node, pk=node_id)
if not node:
return
node.assets.add(*assets)
def perform_create(self, serializer):
assets = serializer.save()
self.set_assets_node(assets)
class AssetPlatformRetrieveApi(RetrieveAPIView):
queryset = Platform.objects.all()
serializer_class = serializers.PlatformSerializer
rbac_perms = {
'retrieve': 'assets.view_gateway'
}
def get_object(self):
asset_pk = self.kwargs.get('pk')
asset = get_object_or_404(Asset, pk=asset_pk)
return asset.platform
class AssetPlatformViewSet(ModelViewSet):
queryset = Platform.objects.all()
serializer_class = serializers.PlatformSerializer
filterset_fields = ['name', 'base']
search_fields = ['name']
def check_object_permissions(self, request, obj):
if request.method.lower() in ['delete', 'put', 'patch'] and obj.internal:
self.permission_denied(
request, message={"detail": "Internal platform"}
)
return super().check_object_permissions(request, obj)
class AssetsTaskMixin:
def perform_assets_task(self, serializer):
data = serializer.validated_data
action = data['action']
assets = data.get('assets', [])
if action == "refresh":
task = update_assets_hardware_info_manual.delay(assets)
else:
# action == 'test':
task = test_assets_connectivity_manual.delay(assets)
return task
def perform_create(self, serializer):
task = self.perform_assets_task(serializer)
self.set_task_to_serializer_data(serializer, task)
def set_task_to_serializer_data(self, serializer, task):
data = getattr(serializer, '_data', {})
data["task"] = task.id
setattr(serializer, '_data', data)
class AssetTaskCreateApi(AssetsTaskMixin, generics.CreateAPIView):
model = Asset
serializer_class = serializers.AssetTaskSerializer
def create(self, request, *args, **kwargs):
pk = self.kwargs.get('pk')
request.data['asset'] = pk
request.data['assets'] = [pk]
return super().create(request, *args, **kwargs)
def check_permissions(self, request):
action = request.data.get('action')
action_perm_require = {
'refresh': 'assets.refresh_assethardwareinfo',
'push_system_user': 'assets.push_assetsystemuser',
'test': 'assets.test_assetconnectivity',
'test_system_user': 'assets.test_assetconnectivity'
}
perm_required = action_perm_require.get(action)
has = self.request.user.has_perm(perm_required)
if not has:
self.permission_denied(request)
def perform_asset_task(self, serializer):
data = serializer.validated_data
action = data['action']
if action not in ['push_system_user', 'test_system_user']:
return
asset = data['asset']
system_users = data.get('system_users')
if not system_users:
system_users = asset.get_all_system_users()
if action == 'push_system_user':
task = push_system_users_a_asset.delay(system_users, asset=asset)
elif action == 'test_system_user':
task = test_system_users_connectivity_a_asset.delay(system_users, asset=asset)
else:
task = None
return task
def perform_create(self, serializer):
task = self.perform_asset_task(serializer)
if not task:
task = self.perform_assets_task(serializer)
self.set_task_to_serializer_data(serializer, task)
class AssetsTaskCreateApi(AssetsTaskMixin, generics.CreateAPIView):
model = Asset
serializer_class = serializers.AssetsTaskSerializer
class AssetGatewayListApi(generics.ListAPIView):
serializer_class = serializers.GatewayWithAuthSerializer
rbac_perms = {
'list': 'assets.view_gateway'
}
def get_queryset(self):
asset_id = self.kwargs.get('pk')
asset = get_object_or_404(Asset, pk=asset_id)
if not asset.domain:
return []
queryset = asset.domain.gateways.filter(protocol='ssh')
return queryset
class BaseAssetPermUserOrUserGroupListApi(ListAPIView):
def get_object(self):
asset_id = self.kwargs.get('pk')
asset = get_object_or_404(Asset, pk=asset_id)
return asset
def get_asset_related_perms(self):
asset = self.get_object()
nodes = asset.get_all_nodes(flat=True)
perms = AssetPermission.objects.filter(Q(assets=asset) | Q(nodes__in=nodes))
return perms
class AssetPermUserListApi(BaseAssetPermUserOrUserGroupListApi):
filterset_class = UserFilter
search_fields = ('username', 'email', 'name', 'id', 'source', 'role')
serializer_class = UserSerializer
def get_queryset(self):
perms = self.get_asset_related_perms()
users = User.objects.filter(
Q(assetpermissions__in=perms) | Q(groups__assetpermissions__in=perms)
).distinct()
return users
class AssetPermUserGroupListApi(BaseAssetPermUserOrUserGroupListApi):
serializer_class = UserGroupSerializer
def get_queryset(self):
perms = self.get_asset_related_perms()
user_groups = UserGroup.objects.filter(assetpermissions__in=perms).distinct()
return user_groups
class BaseAssetPermUserOrUserGroupPermissionsListApiMixin(generics.ListAPIView):
model = AssetPermission
serializer_class = AssetPermissionSerializer
filterset_class = AssetPermissionFilter
search_fields = ('name',)
rbac_perms = {
'list': 'perms.view_assetpermission'
}
def get_object(self):
asset_id = self.kwargs.get('pk')
asset = get_object_or_404(Asset, pk=asset_id)
return asset
def filter_asset_related(self, queryset):
asset = self.get_object()
nodes = asset.get_all_nodes(flat=True)
perms = queryset.filter(Q(assets=asset) | Q(nodes__in=nodes))
return perms
def filter_queryset(self, queryset):
queryset = super().filter_queryset(queryset)
queryset = self.filter_asset_related(queryset)
return queryset
class AssetPermUserPermissionsListApi(BaseAssetPermUserOrUserGroupPermissionsListApiMixin):
def filter_queryset(self, queryset):
queryset = super().filter_queryset(queryset)
queryset = self.filter_user_related(queryset)
queryset = queryset.distinct()
return queryset
def filter_user_related(self, queryset):
user = self.get_perm_user()
user_groups = user.groups.all()
perms = queryset.filter(Q(users=user) | Q(user_groups__in=user_groups))
return perms
def get_perm_user(self):
user_id = self.kwargs.get('perm_user_id')
user = get_object_or_404(User, pk=user_id)
return user
class AssetPermUserGroupPermissionsListApi(BaseAssetPermUserOrUserGroupPermissionsListApiMixin):
def filter_queryset(self, queryset):
queryset = super().filter_queryset(queryset)
queryset = self.filter_user_group_related(queryset)
queryset = queryset.distinct()
return queryset
def filter_user_group_related(self, queryset):
user_group = self.get_perm_user_group()
perms = queryset.filter(user_groups=user_group)
return perms
def get_perm_user_group(self):
user_group_id = self.kwargs.get('perm_user_group_id')
user_group = get_object_or_404(UserGroup, pk=user_group_id)
return user_group
| jumpserver/jumpserver | apps/assets/api/asset.py | Python | gpl-3.0 | 10,108 |
#!/usr/bin/env ../jazzshell
"""
Perform song identification by loading up a corpus of harmonic analyses
and comparing parse results to all of them, according to some distance metric.
"""
"""
============================== License ========================================
Copyright (C) 2008, 2010-12 University of Edinburgh, Mark Granroth-Wilding
This file is part of The Jazz Parser.
The Jazz Parser is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The Jazz Parser is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with The Jazz Parser. If not, see <http://www.gnu.org/licenses/>.
============================ End license ======================================
"""
__author__ = "Mark Granroth-Wilding <[email protected]>"
import sys
from optparse import OptionParser
from jazzparser.data.parsing import ParseResults
from jazzparser.parsers.cky.parser import DirectedCkyParser
from jazzparser.utils.options import options_help_text, ModuleOption
from jazzparser.data.tonalspace import TonalSpaceAnalysisSet
from jazzparser.formalisms.music_halfspan import Formalism
from jazzparser.utils.tableprint import pprint_table
def main():
usage = "%prog [options] <song-set> <results-file0> [<results-file1> ...]"
parser = OptionParser(usage=usage)
parser.add_option("--popt", "--parser-options", dest="popts", action="append", help="specify options for the parser that interprets the gold standard annotations. Type '--popt help' to get a list of options (we use a DirectedCkyParser)")
parser.add_option("-m", "--metric", dest="metric", action="store", help="semantics distance metric to use. Use '-m help' for a list of available metrics")
parser.add_option("--mopt", "--metric-options", dest="mopts", action="append", help="options to pass to the semantics metric. Use with '--mopt help' with -m to see available options")
parser.add_option("-r", "--print-results", dest="print_results", action="store", default=5, type="int", help="number of top search results to print for each query (parse result). Default: 5. Use -1 to print distances from all songs in the corpus")
parser.add_option("-g", "--gold-only", dest="gold_only", action="store_true", help="skip results that have no gold standard sequence associated with them (we can't tell which is the right answer for these)")
parser.add_option("--mc", "--metric-computation", dest="metric_computation", action="store_true", help="output the computation information for the metric between the parse result and each top search result")
options, arguments = parser.parse_args()
# For now, we always use the music_halfspan formalism with this script
# If we wanted to make it generic, we'd just load the formalism according
# to a command-line option
formalism = Formalism
# Process parser options
if options.popts is not None:
poptstr = options.popts
if "help" in [s.strip().lower() for s in poptstr]:
# Output this parser's option help
print options_help_text(DirectedCkyParser.PARSER_OPTIONS, intro="Available options for gold standard interpreter")
sys.exit(0)
poptstr = ":".join(poptstr)
else:
poptstr = ""
popts = ModuleOption.process_option_string(poptstr)
# Check that the options are valid
try:
DirectedCkyParser.check_options(popts)
except ModuleOptionError, err:
logger.error("Problem with parser options (--popt): %s" % err)
sys.exit(1)
# Get a distance metric
# Just check this, as it'll cause problems
if len(formalism.semantics_distance_metrics) == 0:
print "ERROR: the formalism defines no distance metrics, so this "\
"script won't work"
sys.exit(1)
# First get the metric
if options.metric == "help":
# Print out a list of metrics available
print "Available distance metrics:"
print ", ".join([metric.name for metric in \
formalism.semantics_distance_metrics])
sys.exit(0)
if options.metric is None:
# Use the first in the list as default
metric_cls = formalism.semantics_distance_metrics[0]
else:
for m in formalism.semantics_distance_metrics:
if m.name == options.metric:
metric_cls = m
break
else:
# No metric found matching this name
print "No metric '%s'" % options.metric
sys.exit(1)
print >>sys.stderr, "Using distance metric: %s" % metric_cls.name
# Now process the metric options
if options.mopts is not None:
moptstr = options.mopts
if "help" in [s.strip().lower() for s in moptstr]:
# Output this parser's option help
print options_help_text(metric_cls.OPTIONS, intro="Available options for metric '%s'" % metric_cls.name)
sys.exit(0)
moptstr = ":".join(moptstr)
else:
moptstr = ""
mopts = ModuleOption.process_option_string(moptstr)
# Instantiate the metric with these options
metric = metric_cls(options=mopts)
if len(arguments) < 2:
print >>sys.stderr, "Specify a song corpus name and one or more files to read results from"
sys.exit(1)
# First argument is an TonalSpaceAnalysisSet
corpus_name = arguments[0]
# Load the corpus file
corpus = TonalSpaceAnalysisSet.load(corpus_name)
# The rest of the args are result files to analyze
res_files = arguments[1:]
# Work out how many results to print out
if options.print_results == -1:
print_up_to = None
else:
print_up_to = options.print_results
ranks = []
num_ranked = 0
for filename in res_files:
# Load the parse results
pres = ParseResults.from_file(filename)
if options.gold_only and pres.gold_sequence is None:
# Skip this sequence altogether if requested
continue
print "######################"
print "Read %s" % filename
# Try to get a correct answer from the PR file
if pres.gold_sequence is None:
print "No correct answer specified in input file"
correct_song = None
else:
# Process the name of the sequence in the same way that
# TonalSpaceAnalysisSet does
# Ideally, they should make a common function call, but let's be
# bad for once
correct_song = pres.gold_sequence.string_name.lower()
print "Correct answer: %s" % correct_song
# Could have an empty result list: skip if it does
if len(pres.semantics) == 0:
print "No results"
# Failed to get any result: if this is one of the sequences that
# is in the corpus, count it as a 0 result. Otherwise, skip:
# we wouldn't have counted it anyway
num_ranked += 1
ranks.append(None)
continue
result = pres.semantics[0][1]
# Compare to each of the songs
distances = []
for name,songsem in corpus:
# Get the distance from this song
dist = metric.distance(result, songsem)
distances.append((name,dist,songsem))
# Sort them to get the closest first
distances.sort(key=lambda x:x[1])
print
# Print out the top results, as many as requested
top_results = distances[:print_up_to]
table = [["","Song","Distance"]] + [
["*" if res[0] == correct_song else "",
"%s" % res[0],
"%.2f" % res[1]] for res in top_results]
pprint_table(sys.stdout, table, default_just=True)
print
if correct_song is not None:
# Look for the correct answer in the results
for rank,(name,distance,__) in enumerate(distances):
# Match up the song name to the correct one
if name == correct_song:
correct_rank = rank
break
else:
# The song name was not found in the corpus at all
correct_rank = None
if correct_rank is None:
print "Song was not found in corpus"
else:
print "Correct answer got rank %d" % correct_rank
# Record the ranks so we can compute the MRR
ranks.append(correct_rank+1)
num_ranked += 1
print
if options.metric_computation:
print "Explanation of top result:"
print metric.print_computation(result, distances[0][2])
print
if num_ranked:
print "\nGot ranks for %d sequences" % num_ranked
# Compute the mean reciprocal rank, the reciprocal of the harmonic mean
# of the ranks of the correct answers
mrr = sum([0.0 if rank is None else 1.0/rank for rank in ranks], 0.0) \
/ len(ranks)
print "Mean reciprocal rank: %f" % mrr
if mrr > 0.0:
hmr = 1.0/mrr
print "Harmonic mean rank: %f" % hmr
succ_ranks = [rank for rank in ranks if rank is not None]
print "\nIncluding only successful parses (%d):" % len(succ_ranks)
mrr_succ = sum([1.0/rank for rank in succ_ranks], 0.0) / len(succ_ranks)
print "Mean reciprocal rank: %f" % mrr_succ
if mrr_succ > 0.0:
hmr_succ = 1.0/mrr_succ
print "Harmonic mean rank: %f" % hmr_succ
else:
print "\nNo results to analyze"
if __name__ == "__main__":
main()
| markgw/jazzparser | bin/analysis/findsong.py | Python | gpl-3.0 | 10,264 |
#!/usr/bin/env python
# -*- coding, utf-8 -*-
# FIDATA. Open-source system for analysis of financial and economic data
# Copyright © 2013 Basil Peace
# This file is part of FIDATA.
#
# FIDATA is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FIDATA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FIDATA. If not, see <http://www.gnu.org/licenses/>.
from FIDATA import *
initArgParser('Importer of predefined data', defLogFilename = 'import.log')
initFIDATA()
from csv import DictReader
from os import path
from PIL import Image
classes = []
logging.info('Import of predefined data started')
# logging.info('Importing langs')
# reader = DictReader(open('langs.csv', 'r', encoding = 'UTF8'), delimiter = ';')
# for row in reader:
# Lang(FIDATA, row = row, write = True, tryGetFromDB = False)
# del reader
# commit()
# classes += [Lang]
logging.info('Importing scripts')
reader = DictReader(open('scripts.csv', 'r', encoding = 'UTF8'), delimiter = ';')
for row in reader:
Script(FIDATA, row = row, write = True, tryGetFromDB = False)
del reader
commit()
classes += [Script]
logging.info('Importing countries')
reader = DictReader(open('countries.csv', 'r', encoding = 'UTF8'), delimiter = ';')
for row in reader:
# parent_country
# associated_with
if row['alpha2_code'] == '':
row['alpha2_code'] = None
else:
flagFilename = 'flags\{:s}.png'.format(row['alpha2_code'].lower())
if path.exists(flagFilename):
row['flag'] = Image.open(flagFilename)
if row['gov_website'] == '':
row['gov_website'] = None
if row['stats_website'] == '':
row['stats_website'] = None
FIDATA.country(row = row, write = True, tryGetFromDB = False)
del reader
commit()
classes += [Country]
# logging.info('Importing issuers')
# reader = DictReader(open('issuers.csv', 'r', encoding = 'UTF8'), delimiter = ';')
# for row in reader:
# FIDATA.issuer(row = row, write = True, tryGetFromDB = False)
# del reader
# commit()
# classes += [Issuer]
# logging.info('Importing currencies')
# reader = DictReader(open('currencies.csv', 'r', encoding = 'UTF8'), delimiter = ';')
# for row in reader:
# row['instr_type'] = InstrumentType.Currency
# FIDATA.instrument(row = row, write = True, tryGetFromDB = False)
# del reader
# commit()
# logging.info('Importing instruments')
# reader = DictReader(open('instruments.csv', 'r', encoding = 'UTF8'), delimiter = ';')
# for row in reader:
# FIDATA.instrument(row = row, write = True, tryGetFromDB = False)
# del reader
# commit()
# classes += [Instrument]
logging.info('Importing markets')
reader = DictReader(open('markets.csv', 'r', encoding = 'UTF8'), delimiter = ';')
child_markets = list()
for row in reader:
if row['country_alpha2_code'] == '':
row['country'] = None
else:
row['country'] = FIDATA.country(row = {
'alpha2_code': row['country_alpha2_code'],
'name' : row['country_name']
})
if row['acronym'] == '':
row['acronym'] = None
if row['website'] == '':
row['website'] = None
if row['trade_organizer_symbol'] == '':
FIDATA.market(row = row, write = True, tryGetFromDB = False)
else:
child_markets.append((FIDATA.market(row = row, write = False, tryGetFromDB = False), row['trade_organizer_symbol']))
del reader
for (market, trade_organizer_symbol) in child_markets:
market.tradeOrganizer = FIDATA.market(row = {'symbol': trade_organizer_symbol})
market.write()
del child_markets
commit()
classes += [Market]
logging.info('Importing data providers')
reader = DictReader(open('data_providers.csv', 'r', encoding = 'UTF8'), delimiter = ';')
for row in reader:
if row['trade_organizer_symbol'] == '':
row['trade_organizer'] = None
else:
row['trade_organizer'] = FIDATA.market(row = {'symbol': row['trade_organizer_symbol']})
FIDATA.dataProvider(row = row, write = True, tryGetFromDB = False)
del reader
commit()
classes += [DataProvider]
logging.info('Import of predefined data finished')
FIDATA.analyze(classes)
| FIDATA/database-draft | predefined-data/import.py | Python | gpl-3.0 | 4,368 |
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2011 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import logging
from sleekxmpp.xmlstream import JID
from sleekxmpp.xmlstream.handler import Callback
from sleekxmpp.xmlstream.matcher import StanzaPath
from sleekxmpp.plugins.base import BasePlugin
from sleekxmpp.plugins.xep_0060 import stanza
log = logging.getLogger(__name__)
class XEP_0060(BasePlugin):
"""
XEP-0060 Publish Subscribe
"""
name = 'xep_0060'
description = 'XEP-0060: Publish-Subscribe'
dependencies = set(['xep_0030', 'xep_0004'])
stanza = stanza
def plugin_init(self):
self.node_event_map = {}
self.xmpp.register_handler(
Callback('Pubsub Event: Items',
StanzaPath('message/pubsub_event/items'),
self._handle_event_items))
self.xmpp.register_handler(
Callback('Pubsub Event: Purge',
StanzaPath('message/pubsub_event/purge'),
self._handle_event_purge))
self.xmpp.register_handler(
Callback('Pubsub Event: Delete',
StanzaPath('message/pubsub_event/delete'),
self._handle_event_delete))
self.xmpp.register_handler(
Callback('Pubsub Event: Configuration',
StanzaPath('message/pubsub_event/configuration'),
self._handle_event_configuration))
self.xmpp.register_handler(
Callback('Pubsub Event: Subscription',
StanzaPath('message/pubsub_event/subscription'),
self._handle_event_subscription))
def plugin_end(self):
self.xmpp.remove_handler('Pubsub Event: Items')
self.xmpp.remove_handler('Pubsub Event: Purge')
self.xmpp.remove_handler('Pubsub Event: Delete')
self.xmpp.remove_handler('Pubsub Event: Configuration')
self.xmpp.remove_handler('Pubsub Event: Subscription')
def _handle_event_items(self, msg):
"""Raise events for publish and retraction notifications."""
node = msg['pubsub_event']['items']['node']
multi = len(msg['pubsub_event']['items']) > 1
values = {}
if multi:
values = msg.values
del values['pubsub_event']
for item in msg['pubsub_event']['items']:
event_name = self.node_event_map.get(node, None)
event_type = 'publish'
if item.name == 'retract':
event_type = 'retract'
if multi:
condensed = self.xmpp.Message()
condensed.values = values
condensed['pubsub_event']['items']['node'] = node
condensed['pubsub_event']['items'].append(item)
self.xmpp.event('pubsub_%s' % event_type, msg)
if event_name:
self.xmpp.event('%s_%s' % (event_name, event_type),
condensed)
else:
self.xmpp.event('pubsub_%s' % event_type, msg)
if event_name:
self.xmpp.event('%s_%s' % (event_name, event_type), msg)
def _handle_event_purge(self, msg):
"""Raise events for node purge notifications."""
node = msg['pubsub_event']['purge']['node']
event_name = self.node_event_map.get(node, None)
self.xmpp.event('pubsub_purge', msg)
if event_name:
self.xmpp.event('%s_purge' % event_name, msg)
def _handle_event_delete(self, msg):
"""Raise events for node deletion notifications."""
node = msg['pubsub_event']['delete']['node']
event_name = self.node_event_map.get(node, None)
self.xmpp.event('pubsub_delete', msg)
if event_name:
self.xmpp.event('%s_delete' % event_name, msg)
def _handle_event_configuration(self, msg):
"""Raise events for node configuration notifications."""
node = msg['pubsub_event']['configuration']['node']
event_name = self.node_event_map.get(node, None)
self.xmpp.event('pubsub_config', msg)
if event_name:
self.xmpp.event('%s_config' % event_name, msg)
def _handle_event_subscription(self, msg):
"""Raise events for node subscription notifications."""
node = msg['pubsub_event']['subscription']['node']
event_name = self.node_event_map.get(node, None)
self.xmpp.event('pubsub_subscription', msg)
if event_name:
self.xmpp.event('%s_subscription' % event_name, msg)
def map_node_event(self, node, event_name):
"""
Map node names to events.
When a pubsub event is received for the given node,
raise the provided event.
For example::
map_node_event('http://jabber.org/protocol/tune',
'user_tune')
will produce the events 'user_tune_publish' and 'user_tune_retract'
when the respective notifications are received from the node
'http://jabber.org/protocol/tune', among other events.
Arguments:
node -- The node name to map to an event.
event_name -- The name of the event to raise when a
notification from the given node is received.
"""
self.node_event_map[node] = event_name
def create_node(self, jid, node, config=None, ntype=None, ifrom=None,
block=True, callback=None, timeout=None):
"""
Create and configure a new pubsub node.
A server MAY use a different name for the node than the one provided,
so be sure to check the result stanza for a server assigned name.
If no configuration form is provided, the node will be created using
the server's default configuration. To get the default configuration
use get_node_config().
Arguments:
jid -- The JID of the pubsub service.
node -- Optional name of the node to create. If no name is
provided, the server MAY generate a node ID for you.
The server can also assign a different name than the
one you provide; check the result stanza to see if
the server assigned a name.
config -- Optional XEP-0004 data form of configuration settings.
ntype -- The type of node to create. Servers typically default
to using 'leaf' if no type is provided.
ifrom -- Specify the sender's JID.
block -- Specify if the send call will block until a response
is received, or a timeout occurs. Defaults to True.
timeout -- The length of time (in seconds) to wait for a response
before exiting the send call if blocking is used.
Defaults to sleekxmpp.xmlstream.RESPONSE_TIMEOUT
callback -- Optional reference to a stream handler function. Will
be executed when a reply stanza is received.
"""
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='set')
iq['pubsub']['create']['node'] = node
if config is not None:
form_type = 'http://jabber.org/protocol/pubsub#node_config'
if 'FORM_TYPE' in config['fields']:
config.field['FORM_TYPE']['value'] = form_type
else:
config.add_field(var='FORM_TYPE',
ftype='hidden',
value=form_type)
if ntype:
if 'pubsub#node_type' in config['fields']:
config.field['pubsub#node_type']['value'] = ntype
else:
config.add_field(var='pubsub#node_type', value=ntype)
iq['pubsub']['configure'].append(config)
return iq.send(block=block, callback=callback, timeout=timeout)
def subscribe(self, jid, node, bare=True, subscribee=None, options=None,
ifrom=None, block=True, callback=None, timeout=None):
"""
Subscribe to updates from a pubsub node.
The rules for determining the JID that is subscribing to the node are:
1. If subscribee is given, use that as provided.
2. If ifrom was given, use the bare or full version based on bare.
3. Otherwise, use self.xmpp.boundjid based on bare.
Arguments:
jid -- The pubsub service JID.
node -- The node to subscribe to.
bare -- Indicates if the subscribee is a bare or full JID.
Defaults to True for a bare JID.
subscribee -- The JID that is subscribing to the node.
options --
ifrom -- Specify the sender's JID.
block -- Specify if the send call will block until a response
is received, or a timeout occurs. Defaults to True.
timeout -- The length of time (in seconds) to wait for a
response before exiting the send call if blocking
is used.
Defaults to sleekxmpp.xmlstream.RESPONSE_TIMEOUT
callback -- Optional reference to a stream handler function. Will
be executed when a reply stanza is received.
"""
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='set')
iq['pubsub']['subscribe']['node'] = node
if subscribee is None:
if ifrom:
if bare:
subscribee = JID(ifrom).bare
else:
subscribee = ifrom
else:
if bare:
subscribee = self.xmpp.boundjid.bare
else:
subscribee = self.xmpp.boundjid
iq['pubsub']['subscribe']['jid'] = subscribee
if options is not None:
iq['pubsub']['options'].append(options)
return iq.send(block=block, callback=callback, timeout=timeout)
def unsubscribe(self, jid, node, subid=None, bare=True, subscribee=None,
ifrom=None, block=True, callback=None, timeout=None):
"""
Unubscribe from updates from a pubsub node.
The rules for determining the JID that is unsubscribing
from the node are:
1. If subscribee is given, use that as provided.
2. If ifrom was given, use the bare or full version based on bare.
3. Otherwise, use self.xmpp.boundjid based on bare.
Arguments:
jid -- The pubsub service JID.
node -- The node to subscribe to.
subid -- The specific subscription, if multiple subscriptions
exist for this JID/node combination.
bare -- Indicates if the subscribee is a bare or full JID.
Defaults to True for a bare JID.
subscribee -- The JID that is subscribing to the node.
ifrom -- Specify the sender's JID.
block -- Specify if the send call will block until a response
is received, or a timeout occurs. Defaults to True.
timeout -- The length of time (in seconds) to wait for a
response before exiting the send call if blocking
is used.
Defaults to sleekxmpp.xmlstream.RESPONSE_TIMEOUT
callback -- Optional reference to a stream handler function. Will
be executed when a reply stanza is received.
"""
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='set')
iq['pubsub']['unsubscribe']['node'] = node
if subscribee is None:
if ifrom:
if bare:
subscribee = JID(ifrom).bare
else:
subscribee = ifrom
else:
if bare:
subscribee = self.xmpp.boundjid.bare
else:
subscribee = self.xmpp.boundjid
iq['pubsub']['unsubscribe']['jid'] = subscribee
iq['pubsub']['unsubscribe']['subid'] = subid
return iq.send(block=block, callback=callback, timeout=timeout)
def get_subscriptions(self, jid, node=None, ifrom=None, block=True,
callback=None, timeout=None):
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='get')
iq['pubsub']['subscriptions']['node'] = node
return iq.send(block=block, callback=callback, timeout=timeout)
def get_affiliations(self, jid, node=None, ifrom=None, block=True,
callback=None, timeout=None):
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='get')
iq['pubsub']['affiliations']['node'] = node
return iq.send(block=block, callback=callback, timeout=timeout)
def get_subscription_options(self, jid, node=None, user_jid=None,
ifrom=None, block=True, callback=None,
timeout=None):
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='get')
if user_jid is None:
iq['pubsub']['default']['node'] = node
else:
iq['pubsub']['options']['node'] = node
iq['pubsub']['options']['jid'] = user_jid
return iq.send(block=block, callback=callback, timeout=timeout)
def set_subscription_options(self, jid, node, user_jid, options,
ifrom=None, block=True, callback=None,
timeout=None):
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='get')
iq['pubsub']['options']['node'] = node
iq['pubsub']['options']['jid'] = user_jid
iq['pubsub']['options'].append(options)
return iq.send(block=block, callback=callback, timeout=timeout)
def get_node_config(self, jid, node=None, ifrom=None, block=True,
callback=None, timeout=None):
"""
Retrieve the configuration for a node, or the pubsub service's
default configuration for new nodes.
Arguments:
jid -- The JID of the pubsub service.
node -- The node to retrieve the configuration for. If None,
the default configuration for new nodes will be
requested. Defaults to None.
ifrom -- Specify the sender's JID.
block -- Specify if the send call will block until a response
is received, or a timeout occurs. Defaults to True.
timeout -- The length of time (in seconds) to wait for a response
before exiting the send call if blocking is used.
Defaults to sleekxmpp.xmlstream.RESPONSE_TIMEOUT
callback -- Optional reference to a stream handler function. Will
be executed when a reply stanza is received.
"""
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='get')
if node is None:
iq['pubsub_owner']['default']
else:
iq['pubsub_owner']['configure']['node'] = node
return iq.send(block=block, callback=callback, timeout=timeout)
def get_node_subscriptions(self, jid, node, ifrom=None, block=True,
callback=None, timeout=None):
"""
Retrieve the subscriptions associated with a given node.
Arguments:
jid -- The JID of the pubsub service.
node -- The node to retrieve subscriptions from.
ifrom -- Specify the sender's JID.
block -- Specify if the send call will block until a response
is received, or a timeout occurs. Defaults to True.
timeout -- The length of time (in seconds) to wait for a response
before exiting the send call if blocking is used.
Defaults to sleekxmpp.xmlstream.RESPONSE_TIMEOUT
callback -- Optional reference to a stream handler function. Will
be executed when a reply stanza is received.
"""
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='get')
iq['pubsub_owner']['subscriptions']['node'] = node
return iq.send(block=block, callback=callback, timeout=timeout)
def get_node_affiliations(self, jid, node, ifrom=None, block=True,
callback=None, timeout=None):
"""
Retrieve the affiliations associated with a given node.
Arguments:
jid -- The JID of the pubsub service.
node -- The node to retrieve affiliations from.
ifrom -- Specify the sender's JID.
block -- Specify if the send call will block until a response
is received, or a timeout occurs. Defaults to True.
timeout -- The length of time (in seconds) to wait for a response
before exiting the send call if blocking is used.
Defaults to sleekxmpp.xmlstream.RESPONSE_TIMEOUT
callback -- Optional reference to a stream handler function. Will
be executed when a reply stanza is received.
"""
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='get')
iq['pubsub_owner']['affiliations']['node'] = node
return iq.send(block=block, callback=callback, timeout=timeout)
def delete_node(self, jid, node, ifrom=None, block=True,
callback=None, timeout=None):
"""
Delete a a pubsub node.
Arguments:
jid -- The JID of the pubsub service.
node -- The node to delete.
ifrom -- Specify the sender's JID.
block -- Specify if the send call will block until a response
is received, or a timeout occurs. Defaults to True.
timeout -- The length of time (in seconds) to wait for a response
before exiting the send call if blocking is used.
Defaults to sleekxmpp.xmlstream.RESPONSE_TIMEOUT
callback -- Optional reference to a stream handler function. Will
be executed when a reply stanza is received.
"""
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='set')
iq['pubsub_owner']['delete']['node'] = node
return iq.send(block=block, callback=callback, timeout=timeout)
def set_node_config(self, jid, node, config, ifrom=None, block=True,
callback=None, timeout=None):
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='set')
iq['pubsub_owner']['configure']['node'] = node
iq['pubsub_owner']['configure']['form'].values = config.values
return iq.send(block=block, callback=callback, timeout=timeout)
def publish(self, jid, node, id=None, payload=None, options=None,
ifrom=None, block=True, callback=None, timeout=None):
"""
Add a new item to a node, or edit an existing item.
For services that support it, you can use the publish command
as an event signal by not including an ID or payload.
When including a payload and you do not provide an ID then
the service will generally create an ID for you.
Publish options may be specified, and how those options
are processed is left to the service, such as treating
the options as preconditions that the node's settings
must match.
Arguments:
jid -- The JID of the pubsub service.
node -- The node to publish the item to.
id -- Optionally specify the ID of the item.
payload -- The item content to publish.
options -- A form of publish options.
ifrom -- Specify the sender's JID.
block -- Specify if the send call will block until a response
is received, or a timeout occurs. Defaults to True.
timeout -- The length of time (in seconds) to wait for a response
before exiting the send call if blocking is used.
Defaults to sleekxmpp.xmlstream.RESPONSE_TIMEOUT
callback -- Optional reference to a stream handler function. Will
be executed when a reply stanza is received.
"""
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='set')
iq['pubsub']['publish']['node'] = node
if id is not None:
iq['pubsub']['publish']['item']['id'] = id
if payload is not None:
iq['pubsub']['publish']['item']['payload'] = payload
iq['pubsub']['publish_options'] = options
return iq.send(block=block, callback=callback, timeout=timeout)
def retract(self, jid, node, id, notify=None, ifrom=None, block=True,
callback=None, timeout=None):
"""
Delete a single item from a node.
"""
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='set')
iq['pubsub']['retract']['node'] = node
iq['pubsub']['retract']['notify'] = notify
iq['pubsub']['retract']['item']['id'] = id
return iq.send(block=block, callback=callback, timeout=timeout)
def purge(self, jid, node, ifrom=None, block=True, callback=None,
timeout=None):
"""
Remove all items from a node.
"""
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='set')
iq['pubsub_owner']['purge']['node'] = node
return iq.send(block=block, callback=callback, timeout=timeout)
def get_nodes(self, *args, **kwargs):
"""
Discover the nodes provided by a Pubsub service, using disco.
"""
return self.xmpp['xep_0030'].get_items(*args, **kwargs)
def get_item(self, jid, node, item_id, ifrom=None, block=True,
callback=None, timeout=None):
"""
Retrieve the content of an individual item.
"""
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='get')
item = stanza.Item()
item['id'] = item_id
iq['pubsub']['items']['node'] = node
iq['pubsub']['items'].append(item)
return iq.send(block=block, callback=callback, timeout=timeout)
def get_items(self, jid, node, item_ids=None, max_items=None,
iterator=False, ifrom=None, block=False,
callback=None, timeout=None):
"""
Request the contents of a node's items.
The desired items can be specified, or a query for the last
few published items can be used.
Pubsub services may use result set management for nodes with
many items, so an iterator can be returned if needed.
"""
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='get')
iq['pubsub']['items']['node'] = node
iq['pubsub']['items']['max_items'] = max_items
if item_ids is not None:
for item_id in item_ids:
item = stanza.Item()
item['id'] = item_id
iq['pubsub']['items'].append(item)
if iterator:
return self.xmpp['xep_0059'].iterate(iq, 'pubsub')
else:
return iq.send(block=block, callback=callback, timeout=timeout)
def get_item_ids(self, jid, node, ifrom=None, block=True,
callback=None, timeout=None, iterator=False):
"""
Retrieve the ItemIDs hosted by a given node, using disco.
"""
return self.xmpp['xep_0030'].get_items(jid, node,
ifrom=ifrom,
block=block,
callback=callback,
timeout=timeout,
iterator=iterator)
def modify_affiliations(self, jid, node, affiliations=None, ifrom=None,
block=True, callback=None, timeout=None):
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='set')
iq['pubsub_owner']['affiliations']['node'] = node
if affiliations is None:
affiliations = []
for jid, affiliation in affiliations:
aff = stanza.OwnerAffiliation()
aff['jid'] = jid
aff['affiliation'] = affiliation
iq['pubsub_owner']['affiliations'].append(aff)
return iq.send(block=block, callback=callback, timeout=timeout)
def modify_subscriptions(self, jid, node, subscriptions=None, ifrom=None,
block=True, callback=None, timeout=None):
iq = self.xmpp.Iq(sto=jid, sfrom=ifrom, stype='set')
iq['pubsub_owner']['subscriptions']['node'] = node
if subscriptions is None:
subscriptions = []
for jid, subscription in subscriptions:
sub = stanza.OwnerSubscription()
sub['jid'] = jid
sub['subscription'] = subscription
iq['pubsub_owner']['subscriptions'].append(sub)
return iq.send(block=block, callback=callback, timeout=timeout)
| tiancj/emesene | emesene/e3/xmpp/SleekXMPP/sleekxmpp/plugins/xep_0060/pubsub.py | Python | gpl-3.0 | 25,426 |
# Copyright 2016 Sam Parkinson <[email protected]>
#
# This file is part of Something for Reddit.
#
# Something for Reddit is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Something for Reddit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Something for Reddit. If not, see <http://www.gnu.org/licenses/>.
import re
import sys
from argparse import ArgumentParser
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import Gio
from gi.repository import GLib
from gi.repository import Soup
from redditisgtk.sublist import SubList
from redditisgtk.subentry import SubEntry
from redditisgtk.api import RedditAPI, APIFactory
from redditisgtk.webviews import (FullscreenableWebview, ProgressContainer,
WebviewToolbar)
from redditisgtk.readcontroller import get_read_controller
from redditisgtk.identity import IdentityController
from redditisgtk.identitybutton import IdentityButton
from redditisgtk.comments import CommentsView
from redditisgtk.settings import get_settings, show_settings
from redditisgtk import webviews
VIEW_WEB = 0
VIEW_COMMENTS = 1
class RedditWindow(Gtk.Window):
def __init__(
self,
ic: IdentityController,
api_factory: APIFactory,
start_sub: str = None):
Gtk.Window.__init__(self, title='Something For Reddit',
icon_name='today.sam.reddit-is-gtk')
self.add_events(Gdk.EventMask.KEY_PRESS_MASK)
self.set_default_size(600, 600)
self.set_wmclass("reddit-is-gtk", "Something For Reddit")
self._ic = ic
self._ic.token_changed.connect(self._token_changed_cb)
self._api = None
self._api_factory = api_factory
settings = Gtk.Settings.get_default()
screen = Gdk.Screen.get_default()
css_provider = Gtk.CssProvider.get_default()
if settings.props.gtk_application_prefer_dark_theme:
css_provider.load_from_resource(
'/today/sam/reddit-is-gtk/style.dark.css')
else:
css_provider.load_from_resource(
'/today/sam/reddit-is-gtk/style.css')
context = Gtk.StyleContext()
context.add_provider_for_screen(screen, css_provider,
Gtk.STYLE_PROVIDER_PRIORITY_USER)
self._paned = Gtk.Paned.new(Gtk.Orientation.HORIZONTAL)
self.add(self._paned)
self._paned.show()
self._webview = FullscreenableWebview()
self._webview_bin = ProgressContainer(self._webview)
self._comments = None
self._stack = Gtk.Stack()
self._stack.connect('notify::visible-child', self.__stack_child_cb)
self._paned.add2(self._stack)
#self._paned.child_set_property(self._stack, 'shrink', True)
self._stack.show()
self._sublist_bin = Gtk.Box()
self._paned.add1(self._sublist_bin)
self._sublist_bin.show()
self._sublist = None
self._make_header()
left = Gtk.SizeGroup(mode=Gtk.SizeGroupMode.HORIZONTAL)
left.add_widget(self._left_header)
left.add_widget(self._sublist_bin)
self._paned.connect('notify::position',
self.__notify_position_cb,
self._header_paned)
self._header_paned.connect('notify::position',
self.__notify_position_cb,
self._paned)
self._token_changed_cb(self._ic)
def _token_changed_cb(self, ic):
api = self._api_factory.get_for_token(self._ic.active_token)
if self._api != api:
self.connect_api(api)
def connect_api(self, api: RedditAPI):
start_sub = None
if start_sub is None:
start_sub = get_settings()['default-sub']
if self._api is not None:
# TODO: swap right panel
print('Swapping', self._api, 'for', api)
start_sub = self._sublist.get_uri()
# FIXME: do we need to disconnect the callbacks?
self._sublist.destroy()
self._subentry.destroy()
self._api = api
self._api.request_failed.connect(self.__request_failed_cb)
self._sublist = SubList(self._api, start_sub)
self._sublist.new_other_pane.connect(self.__new_other_pane_cb)
self._sublist_bin.add(self._sublist)
#self._paned.child_set_property(self._sublist, 'shrink', True)
self._sublist.show()
self._subentry = SubEntry(self._api, start_sub)
self._subentry.activate.connect(self.__subentry_activate_cb)
self._subentry.escape_me.connect(self.__subentry_escape_me_cb)
self._left_header.props.custom_title = self._subentry
self._subentry.show()
def __request_failed_cb(self, api, msg, info):
dialog = Gtk.Dialog(use_header_bar=True)
label = Gtk.Label(label=info)
dialog.get_content_area().add(label)
label.show()
dialog.add_button('Retry', Gtk.ResponseType.ACCEPT)
dialog.add_button(':shrug-shoulders:', Gtk.ResponseType.REJECT)
dialog.set_default_response(Gtk.ResponseType.ACCEPT)
dialog.props.transient_for = self
response = dialog.run()
if response == Gtk.ResponseType.ACCEPT:
self._api.resend_message(msg)
dialog.destroy()
def do_event(self, event):
if event.type != Gdk.EventType.KEY_PRESS:
return
if isinstance(self.get_focus(), (Gtk.TextView, Gtk.Entry)):
return
if event.keyval == Gdk.KEY_F6:
self._subentry.focus()
return True
if event.keyval == Gdk.KEY_1:
self._sublist.focus()
return True
if event.keyval == Gdk.KEY_2:
self._stack.set_visible_child(self._comments)
self._comments.focus()
return True
if event.keyval == Gdk.KEY_3:
self._stack.set_visible_child(self._webview_bin)
self._webview.grab_focus()
return True
if event.state & Gdk.ModifierType.MOD1_MASK:
if event.keyval == Gdk.KEY_Left:
self._webview.go_back()
return True
if event.keyval == Gdk.KEY_Right:
self._webview.go_forward()
return True
def __new_other_pane_cb(self, sublist, link, comments, link_first):
if self._comments is not None:
self._stack.remove(self._comments)
self._stack.remove(self._webview_bin)
self._comments = comments
if self._comments is not None:
self._stack.add_titled(self._comments, 'comments', 'Comments')
self._comments.show()
self._stack.add_titled(self._webview_bin, 'web', 'Web')
self._webview_bin.show()
self._webview.show()
self._paned.position = 400 # TODO: constant
if link_first and link:
self._stack.set_visible_child(self._webview_bin)
self._webview.load_uri(link)
else:
self._stack.set_visible_child(self._comments)
if link is not None:
self._webview.load_when_visible(link)
def load_uri_from_label(self, uri):
is_relative = not uri.startswith('http')
is_reddit = re.match('https?:\/\/(www\.|np\.)?reddit\.com\/', uri)
if is_relative or is_reddit:
self.goto_reddit_uri(uri)
return
self._stack.set_visible_child(self._webview_bin)
self._webview.load_uri(uri)
def __notify_position_cb(self, caller, pspec, other):
other.props.position = caller.props.position
def _make_header(self):
self._header_paned = Gtk.Paned()
self.set_titlebar(self._header_paned)
self._left_header = Gtk.HeaderBar()
layout = Gtk.Settings.get_default().props.gtk_decoration_layout
self._left_header.set_decoration_layout(layout.split(':')[0])
self._right_header = Gtk.HeaderBar()
self._right_header.set_decoration_layout(':'+layout.split(':')[1])
self._right_header.props.show_close_button = True
self._header_paned.add1(self._left_header)
self._header_paned.child_set_property(
self._left_header, 'shrink', False)
self._header_paned.add2(self._right_header)
self._header_paned.child_set_property(
self._right_header, 'shrink', False)
self._header_paned.show_all()
self._identity = IdentityButton(self._ic)
self._right_header.pack_start(self._identity)
self._identity.show()
self._stack_switcher = Gtk.StackSwitcher(stack=self._stack)
self._right_header.pack_end(self._stack_switcher)
self._stack_switcher.show()
self._webview_toolbar = WebviewToolbar(self._webview)
self._right_header.pack_end(self._webview_toolbar)
def __stack_child_cb(self, stack, pspec):
self._webview_toolbar.props.visible = \
stack.props.visible_child == self._webview_bin
def get_sublist(self):
return self._sublist
def get_comments_view(self):
return self._comments
def goto_sublist(self, to):
'''
Public api for children:
widget.get_toplevel().goto_sublist('/u/samdroid_/overview')
'''
self._sublist.goto(to)
self._subentry.goto(to)
def goto_reddit_uri(self, uri):
'''
Go to a reddit.com uri, eg. "https://www.reddit.com/r/rct"
'''
for cond in ['https://', 'http://', 'www.', 'np.', 'reddit.com']:
if uri.startswith(cond):
uri = uri[len(cond):]
# Disregard the '' before the leading /
parts = uri.split('/')[1:]
if len(parts) <= 3:
# /u/*/*, /r/*, /r/*/*(sorting)
self.goto_sublist(uri)
elif parts[2] == 'comments':
self.goto_sublist('/r/{}/'.format(parts[1]))
cv = CommentsView(self._api, permalink=uri)
cv.got_post_data.connect(self.__cv_got_post_data_cb)
self.__new_other_pane_cb(None, None, cv, False)
def __cv_got_post_data_cb(self, cv, post):
if not post.get('is_self') and 'url' in post:
self.__new_other_pane_cb(None, post['url'], cv, True)
def __subentry_activate_cb(self, entry, sub):
self._sublist.goto(sub)
self._sublist.focus()
def __subentry_escape_me_cb(self, entry):
self._sublist.focus()
class Application(Gtk.Application):
def __init__(self, ic: IdentityController, api_factory: APIFactory):
Gtk.Application.__init__(self,
application_id='today.sam.reddit-is-gtk')
self.connect('startup', self.__do_startup_cb)
GLib.set_application_name("Something For Reddit")
GLib.set_prgname("reddit-is-gtk")
self._w = None
self._queue_uri = None
self._ic = ic
self._api_factory = api_factory
def do_activate(self):
self._w = RedditWindow(self._ic, self._api_factory)
self.add_window(self._w)
self._w.show()
if self._queue_uri is not None:
self._w.goto_reddit_uri(self._queue_uri)
self._queue_uri = None
def goto_reddit_uri(self, uri):
if self._w is None:
self._queue_uri = uri
else:
self._w.goto_reddit_uri(uri)
# TODO: Using do_startup causes SIGSEGV for me
def __do_startup_cb(self, app):
actions = [('about', self.__about_cb),
('quit', self.__quit_cb),
('issues', self.__issues_cb),
('shortcuts', self.__shortcuts_cb),
('settings', self.__settings_cb)]
for name, cb in actions:
a = Gio.SimpleAction.new(name, None)
a.connect('activate', cb)
self.add_action(a)
builder = Gtk.Builder.new_from_resource(
'/today/sam/reddit-is-gtk/app-menu.ui')
self._menu = builder.get_object('app-menu')
self.props.app_menu = self._menu
def __about_cb(self, action, param):
about_dialog = Gtk.AboutDialog(
program_name='Something for Reddit',
comments=('A simple but powerful Reddit client, built for GNOME '
'powered by Gtk+ 3.0'),
license_type=Gtk.License.GPL_3_0,
logo_icon_name='today.sam.reddit-is-gtk',
authors=['Sam P. <[email protected]>'],
website='https://github.com/samdroid-apps/something-for-reddit',
website_label='Git Repo and Issue Tracker on GitHub',
# VERSION:
version='0.2.2 - “The Bugfix Release ⓇⒺⒹⓊⓍ”',
transient_for=self._w,
modal=True)
about_dialog.present()
def __issues_cb(self, action, param):
webviews.open_uri_external(
'https://github.com/samdroid-apps/something-for-reddit/issues')
def __quit_cb(self, action, param):
self.quit()
def __shortcuts_cb(self, action, param):
builder = Gtk.Builder.new_from_resource(
'/today/sam/reddit-is-gtk/shortcuts-window.ui')
builder.get_object('window').show()
def __settings_cb(self, action, param):
show_settings()
def run():
parser = ArgumentParser(
description='Something For Reddit - a Gtk+ Reddit Client')
parser.add_argument('uri', help='Reddit.com URI to open, or None',
default=None, nargs='?')
parser.add_argument('--dark', help='Force Gtk+ dark theme',
action='store_true')
args = parser.parse_args()
settings = Gtk.Settings.get_default()
theme = get_settings()['theme']
if theme == 'dark':
settings.props.gtk_application_prefer_dark_theme = True
elif theme == 'light':
settings.props.gtk_application_prefer_dark_theme = False
if args.dark:
settings.props.gtk_application_prefer_dark_theme = True
session = Soup.Session()
ic = IdentityController(session)
api_factory = APIFactory(session)
a = Application(ic, api_factory)
if args.uri is not None:
a.goto_reddit_uri(args.uri)
status = a.run()
get_read_controller().save()
sys.exit(status)
| samdroid-apps/something-for-reddit | redditisgtk/main.py | Python | gpl-3.0 | 14,799 |
#!/usr/bin/env python
"""XBeeModem.py bypasses the XBee's 802.15.4 capabilities and simply uses it modem for communications
You don't have to master 802.15.4 and a large set of XBee commands
to make a very simple but potentially useful network. At its core,
the XBee radio is a modem and you can use it directly for simple serial communications.
Reference Materials:
Non-blocking read from stdin in python - http://repolinux.wordpress.com/2012/10/09/non-blocking-read-from-stdin-in-python/
Non-blocking read on a subprocess.PIPE in python - http://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
Originally Created By:
Jeff Irland ([email protected]) in March 2013
"""
# imported modules
# import os # portable way of using operating system dependent functionality
import sys # provides access to some variables used or maintained by the interpreter
import time # provides various time-related functions
# import fcntl # performs file control and I/O control on file descriptors
# import serial # encapsulates the access for the serial port
# import urllib
from serial import Serial
# from pretty import switchColor, printc # provides colored text for xterm & VT100 type terminals using ANSI escape sequences
from webiopi.clients import PiHttpClient, Macro
from webiopi.utils.logger import exception, setDebug, info, debug, logToFile
from webiopi.utils.thread import stop
VERSION = '1.0'
def displayHelp():
print("Xbee command-line usage")
print("xbee [-h] [-c config] [-l log] [-d] [port]")
print("")
print("Options:")
print(" -h, --help Display this help")
print(" -c, --config file Load config from file")
print(" -l, --log file Log to file")
print(" -d, --debug Enable DEBUG")
print("")
print("Arguments:")
print(" port WebIOPi port")
exit()
def main(argv):
port = 8000
configfile = None
logfile = None
i = 1
while i < len(argv):
if argv[i] in ["-c", "-C", "--config-file"]:
configfile = argv[i+1]
i+=1
elif argv[i] in ["-l", "-L", "--log-file"]:
logfile = argv[i+1]
i+=1
elif argv[i] in ["-h", "-H", "--help"]:
displayHelp()
elif argv[i] in ["-d", "--debug"]:
setDebug()
else:
try:
port = int(argv[i])
except ValueError:
displayHelp()
i+=1
if logfile:
logToFile(logfile)
info("Starting XBee %s" % VERSION)
# setup serial
serial = Serial()
serial.port = '/dev/ttyAMA0'
serial.baudrate = 9600
serial.timeout = 1
serial.writeTimeout = 1
serial.open()
# disregard any pending data in xbee buffer
serial.flushInput()
# force to show xbee boot menu
time.sleep(.5)
serial.writelines("\r")
time.sleep(.5)
# read menu
while serial.inWaiting() > 0:
debug("%s" % serial.readline())
# trigger bypass automatically
serial.writelines("B")
# post startup message to other XBee's and at stdout
#serial.writelines("RPi #1 is up and running.\r\n")
info("RPi #1 is up and running.")
try:
while True:
waitToSend = True
# read a line from XBee and convert it from b'xxx\r\n' to xxx and send to webiopi
while serial.inWaiting() > 0:
try:
line = serial.readline().decode('utf-8').strip('\n\r')
if line:
waitToSend = False
debug("Received: %s" % line)
try:
client = PiHttpClient("127.0.0.1")
macro = Macro(client, "setCarInfo")
macro.call(line.replace(",", "%2C"))
except:
exception("setting car info failed!")
except KeyboardInterrupt:
raise
except Exception as e:
exception(e)
time.sleep(1.)
try:
time.sleep(1.)
client = PiHttpClient("127.0.0.1")
macro = Macro(client, "getPitInfo")
data = macro.call()
if data:
debug("Sending: %s" % data)
serial.writelines(data + "\n")
except KeyboardInterrupt:
raise
except Exception as e:
exception(e)
time.sleep(1.)
except KeyboardInterrupt:
info("*** Ctrl-C keyboard interrupt ***")
if __name__ == "__main__":
try:
main(sys.argv)
except Exception as e:
exception(e)
stop()
info("RPi #1 is going down")
| HelloClarice/ClariceNet | Pit/RaspberryPi/daemons/xbee.py | Python | gpl-3.0 | 4,184 |
from random import random, randint
from PIL import Image, ImageDraw, ImageFont
import perlin
def draw_background(setup) :
canvas = setup['canvas']
image = Image.new('RGBA', canvas, tuple(setup['color']['back']))
background = Image.new('RGBA', canvas, (0,0,0,0))
draw = ImageDraw.Draw(background)
stars = [[ int(p * random()) for p in canvas ] for x in range(400) ]
scale = lambda x, r : x + r * (min(canvas) / 320)
color = (255, 255, 255, 100)
for x, y in stars :
r = random()
draw.ellipse([x, y, scale(x, r), scale(y, r)], fill=color)
return Image.alpha_composite(image, background)
def apply_noise(image, setup) :
generator = perlin.Perlin()
octaves = 5
persistence = 5
coef = 30
width, height = setup['canvas'][0], setup['canvas'][1]
list_of_pixels = list(image.getdata())
for i, pixel in enumerate(list_of_pixels) :
if pixel != (0, 0, 0, 0) :
noise = generator.OctavePerlin((i % width) / coef, i / (height * coef), 0, 1, 5)
new_pixel = [ int(x * (1 + noise)) for x in pixel[:3] ]
new_pixel.append(pixel[3])
list_of_pixels[i] = tuple(new_pixel)
image = Image.new(image.mode, image.size)
image.putdata(list_of_pixels)
return image
def apply_ray_effect(sun_image, setup) :
canvas = setup['canvas']
width, height = setup['canvas'][0], setup['canvas'][1]
decay = 0.8
density = 1.2
samples = 128
center = [ x / 2 for x in setup['canvas'] ]
list_of_pixels = list(sun_image.getdata())
new_image = []
print("starting postprocessing...")
for y in range(height) :
print("\rjob completed {0:.2f}%".format(round(100 * (y / height), 2)), flush=True, end="")
for x in range(width) :
tc = [x, y]
delta = [ (x - center[0]) / (samples * density), (y - center[1]) / (samples * density) ]
color = list_of_pixels[x + y * width]
illumination = 1
for m in range(samples) :
tc = [ tc[0] - delta[0], tc[1] - delta[1]]
add_color = tuple( illumination * x for x in list_of_pixels[int(tc[0]) + int(tc[1]) * width] )
illumination *= decay
color = tuple( x + y for x, y in zip(color, add_color))
new_image.append(tuple(int(x) for x in color))
image = Image.new(sun_image.mode, sun_image.size)
image.putdata(new_image)
return image
def draw_sun(image, setup) :
canvas = setup['canvas']
sun_image = Image.new('RGBA', canvas, (0,0,0,0))
draw = ImageDraw.Draw(sun_image)
draw.ellipse(setup['sun'], fill=tuple(setup['color']['base']))
sun_image = apply_noise(sun_image, setup)
sun_image = apply_ray_effect(sun_image, setup)
return Image.alpha_composite(image, sun_image)
def create_sun(setup) :
canvas, size = setup['canvas'], setup['size']
d = min([x * 0.08 * 5 * size for x in canvas])
planet = [ (x - d) / 2 for x in canvas ]
planet.append(planet[0] + d)
planet.append(planet[1] + d)
setup['sun'] = planet
setup['diam'] = d
setup['rad'] = d / 2
setup['center'] = [ planet[0] + d / 2, planet[1] + d / 2 ]
def sun_setup(setup) :
tmp_setup = {}
tmp_setup['color'] = {}
tmp_setup['color']['base'] = setup[2]
tmp_setup['color']['back'] = [ int(x * 0.05) for x in setup[2] ]
tmp_setup['canvas'] = [ x * 2 for x in setup[0] ]
tmp_setup['size'] = setup[1] / (255 * 2)
return tmp_setup
def sun(setup) :
setup = sun_setup(setup)
create_sun(setup)
image = draw_background(setup)
image = draw_sun(image, setup)
canvas = [ int(x / 2) for x in setup['canvas'] ]
resized = image.resize(canvas, Image.ANTIALIAS)
resized.save("test.png")
setup = ((1200, 750), 128, (180, 120, 100))
sun(setup)
| vojtatom/planets | sun.py | Python | gpl-3.0 | 3,499 |
#!/usr/bin/python
import pygame
import math
import random
import sys
import PixelPerfect
from pygame.locals import *
from water import Water
from menu import Menu
from game import Game
from highscores import Highscores
from options import Options
import util
from locals import *
import health
import cloud
import mine
import steamboat
import pirateboat
import shark
import seagull
def init():
health.init()
steamboat.init()
shark.init()
pirateboat.init()
cloud.init()
mine.init()
seagull.init()
def main():
global SCREEN_FULLSCREEN
pygame.init()
util.load_config()
if len(sys.argv) > 1:
for arg in sys.argv:
if arg == "-np":
Variables.particles = False
elif arg == "-na":
Variables.alpha = False
elif arg == "-nm":
Variables.music = False
elif arg == "-ns":
Variables.sound = False
elif arg == "-f":
SCREEN_FULLSCREEN = True
scr_options = 0
if SCREEN_FULLSCREEN: scr_options += FULLSCREEN
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),scr_options ,32)
pygame.display.set_icon(util.load_image("kuvake"))
pygame.display.set_caption("Trip on the Funny Boat")
init()
joy = None
if pygame.joystick.get_count() > 0:
joy = pygame.joystick.Joystick(0)
joy.init()
try:
util.load_music("JDruid-Trip_on_the_Funny_Boat")
if Variables.music:
pygame.mixer.music.play(-1)
except:
# It's not a critical problem if there's no music
pass
pygame.time.set_timer(NEXTFRAME, 1000 / FPS) # 30 fps
Water.global_water = Water()
main_selection = 0
while True:
main_selection = Menu(screen, ("New Game", "High Scores", "Options", "Quit"), main_selection).run()
if main_selection == 0:
# New Game
selection = Menu(screen, ("Story Mode", "Endless Mode")).run()
if selection == 0:
# Story
score = Game(screen).run()
Highscores(screen, score).run()
elif selection == 1:
# Endless
score = Game(screen, True).run()
Highscores(screen, score, True).run()
elif main_selection == 1:
# High Scores
selection = 0
while True:
selection = Menu(screen, ("Story Mode", "Endless Mode", "Endless Online"), selection).run()
if selection == 0:
# Story
Highscores(screen).run()
elif selection == 1:
# Endless
Highscores(screen, endless = True).run()
elif selection == 2:
# Online
Highscores(screen, endless = True, online = True).run()
else:
break
elif main_selection == 2:
# Options
selection = Options(screen).run()
else: #if main_selection == 3:
# Quit
return
if __name__ == '__main__':
main()
| italomaia/turtle-linux | games/FunnyBoat/run_game.py | Python | gpl-3.0 | 3,190 |
#!/usr/bin/env python2
#
# Copyright 2016 Philipp Winter <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Turn pcap into csv file.
Extract timestamp, source IP address, and query name of all DNS queries in the
given pcap, and turn it into a CSV.
"""
import sys
import scapy.all as scapy
# We exclude the following two measurement hosts.
MEASUREMENT_HOSTS = frozenset(["92.243.1.186", "198.83.85.34"])
def process_file(pcap_file):
packets = scapy.rdpcap(pcap_file)
for packet in packets:
if not packet.haslayer(scapy.IP):
continue
if not packet.haslayer(scapy.DNSQR):
continue
query = packet[scapy.DNSQR].qname
src_addr = packet[scapy.IP].src
# Skip DNS response.
if src_addr in MEASUREMENT_HOSTS:
continue
print "%s,%s,%s" % (packet.time, packet[scapy.IP].src, query.lower())
return 0
if __name__ == "__main__":
if len(sys.argv) != 2:
print >> sys.stderr, "\nUsage: %s PCAP_FILE\n" % sys.argv[0]
sys.exit(1)
pcap_file = sys.argv[1]
sys.exit(process_file(pcap_file))
| NullHypothesis/tor-dns-tools | dns-pcap-to-csv.py | Python | gpl-3.0 | 1,724 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
# pylint: disable=too-few-public-methods
"""State describing the conversion to momentum transfer"""
from __future__ import (absolute_import, division, print_function)
import json
import copy
from sans.state.state_base import (StateBase, rename_descriptor_names, BoolParameter, PositiveFloatParameter,
ClassTypeParameter, StringParameter)
from sans.common.enums import (ReductionDimensionality, RangeStepType, SANSFacility)
from sans.state.state_functions import (is_pure_none_or_not_none, is_not_none_and_first_larger_than_second,
validation_message)
from sans.state.automatic_setters import (automatic_setters)
# ----------------------------------------------------------------------------------------------------------------------
# State
# ----------------------------------------------------------------------------------------------------------------------
@rename_descriptor_names
class StateConvertToQ(StateBase):
reduction_dimensionality = ClassTypeParameter(ReductionDimensionality)
use_gravity = BoolParameter()
gravity_extra_length = PositiveFloatParameter()
radius_cutoff = PositiveFloatParameter()
wavelength_cutoff = PositiveFloatParameter()
# 1D settings
q_min = PositiveFloatParameter()
q_max = PositiveFloatParameter()
q_1d_rebin_string = StringParameter()
# 2D settings
q_xy_max = PositiveFloatParameter()
q_xy_step = PositiveFloatParameter()
q_xy_step_type = ClassTypeParameter(RangeStepType)
# -----------------------
# Q Resolution specific
# ---------------------
use_q_resolution = BoolParameter()
q_resolution_collimation_length = PositiveFloatParameter()
q_resolution_delta_r = PositiveFloatParameter()
moderator_file = StringParameter()
# Circular aperture settings
q_resolution_a1 = PositiveFloatParameter()
q_resolution_a2 = PositiveFloatParameter()
# Rectangular aperture settings
q_resolution_h1 = PositiveFloatParameter()
q_resolution_h2 = PositiveFloatParameter()
q_resolution_w1 = PositiveFloatParameter()
q_resolution_w2 = PositiveFloatParameter()
def __init__(self):
super(StateConvertToQ, self).__init__()
self.reduction_dimensionality = ReductionDimensionality.OneDim
self.use_gravity = False
self.gravity_extra_length = 0.0
self.use_q_resolution = False
self.radius_cutoff = 0.0
self.wavelength_cutoff = 0.0
def validate(self):
is_invalid = {}
# 1D Q settings
if not is_pure_none_or_not_none([self.q_min, self.q_max]):
entry = validation_message("The q boundaries for the 1D reduction are inconsistent.",
"Make sure that both q boundaries are set (or none).",
{"q_min": self.q_min,
"q_max": self.q_max})
is_invalid.update(entry)
if is_not_none_and_first_larger_than_second([self.q_min, self.q_max]):
entry = validation_message("Incorrect q bounds for 1D reduction.",
"Make sure that the lower q bound is smaller than the upper q bound.",
{"q_min": self.q_min,
"q_max": self.q_max})
is_invalid.update(entry)
if self.reduction_dimensionality is ReductionDimensionality.OneDim:
if self.q_min is None or self.q_max is None:
entry = validation_message("Q bounds not set for 1D reduction.",
"Make sure to set the q boundaries when using a 1D reduction.",
{"q_min": self.q_min,
"q_max": self.q_max})
is_invalid.update(entry)
if self.q_1d_rebin_string is not None:
if self.q_1d_rebin_string == "":
entry = validation_message("Q rebin string does not seem to be valid.",
"Make sure to provide a valid rebin string",
{"q_1d_rebin_string": self.q_1d_rebin_string})
is_invalid.update(entry)
elif not is_valid_rebin_string(self.q_1d_rebin_string):
entry = validation_message("Q rebin string does not seem to be valid.",
"Make sure to provide a valid rebin string",
{"q_1d_rebin_string": self.q_1d_rebin_string})
is_invalid.update(entry)
# QXY settings
if self.reduction_dimensionality is ReductionDimensionality.TwoDim:
if self.q_xy_max is None or self.q_xy_step is None:
entry = validation_message("Q bounds not set for 2D reduction.",
"Make sure that the q_max value bound and the step for the 2D reduction.",
{"q_xy_max": self.q_xy_max,
"q_xy_step": self.q_xy_step})
is_invalid.update(entry)
# Q Resolution settings
if self.use_q_resolution:
if not is_pure_none_or_not_none([self.q_resolution_a1, self.q_resolution_a2]):
entry = validation_message("Inconsistent circular geometry.",
"Make sure that both diameters for the circular apertures are set.",
{"q_resolution_a1": self.q_resolution_a1,
"q_resolution_a2": self.q_resolution_a2})
is_invalid.update(entry)
if not is_pure_none_or_not_none([self.q_resolution_h1, self.q_resolution_h2, self.q_resolution_w1,
self.q_resolution_w2]):
entry = validation_message("Inconsistent rectangular geometry.",
"Make sure that both diameters for the circular apertures are set.",
{"q_resolution_h1": self.q_resolution_h1,
"q_resolution_h2": self.q_resolution_h2,
"q_resolution_w1": self.q_resolution_w1,
"q_resolution_w2": self.q_resolution_w2})
is_invalid.update(entry)
if all(element is None for element in [self.q_resolution_a1, self.q_resolution_a2, self.q_resolution_w1,
self.q_resolution_w2, self.q_resolution_h1, self.q_resolution_h2]):
entry = validation_message("Aperture is undefined.",
"Make sure that you set the geometry for a circular or a "
"rectangular aperture.",
{"q_resolution_a1": self.q_resolution_a1,
"q_resolution_a2": self.q_resolution_a2,
"q_resolution_h1": self.q_resolution_h1,
"q_resolution_h2": self.q_resolution_h2,
"q_resolution_w1": self.q_resolution_w1,
"q_resolution_w2": self.q_resolution_w2})
is_invalid.update(entry)
if self.moderator_file is None:
entry = validation_message("Missing moderator file.",
"Make sure to specify a moderator file when using q resolution.",
{"moderator_file": self.moderator_file})
is_invalid.update(entry)
is_invalid.update({"moderator_file": "A moderator file is required for the q resolution calculation."})
if is_invalid:
raise ValueError("StateMoveDetectorISIS: The provided inputs are illegal. "
"Please see: {0}".format(json.dumps(is_invalid)))
# ----------------------------------------------------------------------------------------------------------------------
# Builder
# ----------------------------------------------------------------------------------------------------------------------
class StateConvertToQBuilder(object):
@automatic_setters(StateConvertToQ)
def __init__(self):
super(StateConvertToQBuilder, self).__init__()
self.state = StateConvertToQ()
def build(self):
self.state.validate()
return copy.copy(self.state)
# ------------------------------------------
# Factory method for StateConvertToQBuilder
# ------------------------------------------
def get_convert_to_q_builder(data_info):
# The data state has most of the information that we require to define the q conversion.
# For the factory method, only the facility/instrument is of relevance.
facility = data_info.facility
if facility is SANSFacility.ISIS:
return StateConvertToQBuilder()
else:
raise NotImplementedError("StateConvertToQBuilder: Could not find any valid save builder for the "
"specified StateData object {0}".format(str(data_info)))
# -------------------------------------------
# Free functions
# -------------------------------------------
def is_valid_rebin_string(rebin_string):
is_valid = True
try:
values = [float(el) for el in rebin_string.split(",")]
if len(values) < 2:
is_valid = False
elif len(values) == 2:
if values[0] > values[1]:
is_valid = False
elif len(values) % 2 == 1: # odd number of entries
step_points = values[::2]
if not is_increasing(step_points):
is_valid = False
else:
is_valid = False
except: # noqa
is_valid = False
return is_valid
def is_increasing(step_points):
return all(el1 <= el2 for el1, el2 in zip(step_points, step_points[1:]))
| mganeva/mantid | scripts/SANS/sans/state/convert_to_q.py | Python | gpl-3.0 | 10,526 |
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
"""
Viewer for archives packaged by archive.py
"""
from __future__ import print_function
import argparse
import os
import pprint
import sys
import tempfile
import zlib
from PyInstaller.loader import pyimod02_archive
from PyInstaller.archive.readers import CArchiveReader, NotAnArchiveError
from PyInstaller.compat import stdin_input
import PyInstaller.log
stack = []
cleanup = []
def main(name, brief, debug, rec_debug, **unused_options):
global stack
if not os.path.isfile(name):
print(name, "is an invalid file name!", file=sys.stderr)
return 1
arch = get_archive(name)
stack.append((name, arch))
if debug or brief:
show_log(arch, rec_debug, brief)
raise SystemExit(0)
else:
show(name, arch)
while 1:
try:
toks = stdin_input('? ').split(None, 1)
except EOFError:
# Ctrl-D
print(file=sys.stderr) # Clear line.
break
if not toks:
usage()
continue
if len(toks) == 1:
cmd = toks[0]
arg = ''
else:
cmd, arg = toks
cmd = cmd.upper()
if cmd == 'U':
if len(stack) > 1:
arch = stack[-1][1]
arch.lib.close()
del stack[-1]
name, arch = stack[-1]
show(name, arch)
elif cmd == 'O':
if not arg:
arg = stdin_input('open name? ')
arg = arg.strip()
try:
arch = get_archive(arg)
except NotAnArchiveError as e:
print(e, file=sys.stderr)
continue
if arch is None:
print(arg, "not found", file=sys.stderr)
continue
stack.append((arg, arch))
show(arg, arch)
elif cmd == 'X':
if not arg:
arg = stdin_input('extract name? ')
arg = arg.strip()
data = get_data(arg, arch)
if data is None:
print("Not found", file=sys.stderr)
continue
filename = stdin_input('to filename? ')
if not filename:
print(repr(data))
else:
with open(filename, 'wb') as fp:
fp.write(data)
elif cmd == 'Q':
break
else:
usage()
do_cleanup()
def do_cleanup():
global stack, cleanup
for (name, arch) in stack:
arch.lib.close()
stack = []
for filename in cleanup:
try:
os.remove(filename)
except Exception as e:
print("couldn't delete", filename, e.args, file=sys.stderr)
cleanup = []
def usage():
print("U: go Up one level", file=sys.stderr)
print("O <name>: open embedded archive name", file=sys.stderr)
print("X <name>: extract name", file=sys.stderr)
print("Q: quit", file=sys.stderr)
def get_archive(name):
if not stack:
if name[-4:].lower() == '.pyz':
return ZlibArchive(name)
return CArchiveReader(name)
parent = stack[-1][1]
try:
return parent.openEmbedded(name)
except KeyError:
return None
except (ValueError, RuntimeError):
ndx = parent.toc.find(name)
dpos, dlen, ulen, flag, typcd, name = parent.toc[ndx]
x, data = parent.extract(ndx)
tempfilename = tempfile.mktemp()
cleanup.append(tempfilename)
with open(tempfilename, 'wb') as fp:
fp.write(data)
if typcd == 'z':
return ZlibArchive(tempfilename)
else:
return CArchiveReader(tempfilename)
def get_data(name, arch):
if isinstance(arch.toc, dict):
(ispkg, pos, length) = arch.toc.get(name, (0, None, 0))
if pos is None:
return None
with arch.lib:
arch.lib.seek(arch.start + pos)
return zlib.decompress(arch.lib.read(length))
ndx = arch.toc.find(name)
dpos, dlen, ulen, flag, typcd, name = arch.toc[ndx]
x, data = arch.extract(ndx)
return data
def show(name, arch):
if isinstance(arch.toc, dict):
print(" Name: (ispkg, pos, len)")
toc = arch.toc
else:
print(" pos, length, uncompressed, iscompressed, type, name")
toc = arch.toc.data
pprint.pprint(toc)
def get_content(arch, recursive, brief, output):
if isinstance(arch.toc, dict):
toc = arch.toc
if brief:
for name, _ in toc.items():
output.append(name)
else:
output.append(toc)
else:
toc = arch.toc.data
for el in toc:
if brief:
output.append(el[5])
else:
output.append(el)
if recursive:
if el[4] in ('z', 'a'):
get_content(get_archive(el[5]), recursive, brief, output)
stack.pop()
def show_log(arch, recursive, brief):
output = []
get_content(arch, recursive, brief, output)
# first print all TOCs
for out in output:
if isinstance(out, dict):
pprint.pprint(out)
# then print the other entries
pprint.pprint([out for out in output if not isinstance(out, dict)])
def get_archive_content(filename):
"""
Get a list of the (recursive) content of archive `filename`.
This function is primary meant to be used by runtests.
"""
archive = get_archive(filename)
stack.append((filename, archive))
output = []
get_content(archive, recursive=True, brief=True, output=output)
do_cleanup()
return output
class ZlibArchive(pyimod02_archive.ZlibArchiveReader):
def checkmagic(self):
""" Overridable.
Check to see if the file object self.lib actually has a file
we understand.
"""
self.lib.seek(self.start) # default - magic is at start of file.
if self.lib.read(len(self.MAGIC)) != self.MAGIC:
raise RuntimeError("%s is not a valid %s archive file"
% (self.path, self.__class__.__name__))
if self.lib.read(len(self.pymagic)) != self.pymagic:
print("Warning: pyz is from a different Python version",
file=sys.stderr)
self.lib.read(4)
def run():
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--log',
default=False,
action='store_true',
dest='debug',
help='Print an archive log (default: %(default)s)')
parser.add_argument('-r', '--recursive',
default=False,
action='store_true',
dest='rec_debug',
help='Recursively print an archive log (default: %(default)s). '
'Can be combined with -r')
parser.add_argument('-b', '--brief',
default=False,
action='store_true',
dest='brief',
help='Print only file name. (default: %(default)s). '
'Can be combined with -r')
PyInstaller.log.__add_options(parser)
parser.add_argument('name', metavar='pyi_archive',
help="pyinstaller archive to show content of")
args = parser.parse_args()
PyInstaller.log.__process_options(parser, args)
try:
raise SystemExit(main(**vars(args)))
except KeyboardInterrupt:
raise SystemExit("Aborted by user request.")
if __name__ == '__main__':
run()
| etherkit/OpenBeacon2 | client/linux-arm/venv/lib/python3.6/site-packages/PyInstaller/utils/cliutils/archive_viewer.py | Python | gpl-3.0 | 8,195 |
from .execute import GraphNode
from . import preprocess
def compile(layout_dict):
preprocess.proprocess(layout_dict)
# get nodes without any outputs
root_nodes = layout_dict["nodes"].keys() - {l[0] for l in layout_dict["links"]}
graph_dict = {}
out = [GraphNode.from_layout(root_node, layout_dict, graph_dict) for root_node in root_nodes]
return out
| Sverchok/SverchokRedux | core/compiler.py | Python | gpl-3.0 | 376 |
from .google import GoogleSpeaker
from .watson import WatsonSpeaker
"""
alfred
~~~~~~~~~~~~~~~~
Google tts.
"""
__all__ = [
'GoogleSpeaker',
'WatsonSpeaker'
]
| lowdev/alfred | speaker/tts/__init__.py | Python | gpl-3.0 | 169 |
class Solution(object):
def findPaths(self, m, n, N, i, j):
"""
:type m: int
:type n: int
:type N: int
:type i: int
:type j: int
:rtype: int
"""
MOD = 1000000007
paths = 0
cur = {(i, j): 1}
for i in xrange(N):
next = collections.defaultdict(int)
for (x, y), cnt in cur.iteritems():
for dx, dy in [[-1, 0], [0, 1], [1, 0], [0, -1]]:
nx = x + dx
ny = y + dy
if nx < 0 or ny < 0 or nx >= m or ny >= n:
paths += cnt
paths %= MOD
else:
next[(nx, ny)] += cnt
next[(nx, ny)] %= MOD
cur = next
return paths
# 94 / 94 test cases passed.
# Status: Accepted
# Runtime: 232 ms
# beats 75.36 %
| zqfan/leetcode | algorithms/576. Out of Boundary Paths/solution.py | Python | gpl-3.0 | 918 |
import os
import unittest
from urlparse import urlparse
from paegan.utils.asarandom import AsaRandom
class AsaRandomTest(unittest.TestCase):
def test_create_random_filename(self):
temp_filename = AsaRandom.filename(prefix="superduper", suffix=".nc")
path = urlparse(temp_filename).path
name, ext = os.path.splitext(path)
assert name.index("superduper") == 0
assert ext == ".nc" | asascience-open/paegan | tests/test_asarandom.py | Python | gpl-3.0 | 424 |
"""
Contains exception classes specific to this project.
"""
| electronic-library/electronic-library-core | library/exceptions.py | Python | gpl-3.0 | 62 |
class Zone:
def __init__(self, id_zone, name, region, description):
self.id = id_zone
self.name = name
self.region = region
self.description = description
| Crystal-SDS/dashboard | crystal_dashboard/dashboards/crystal/zones/models.py | Python | gpl-3.0 | 192 |
# -*- coding: iso-8859-1 -*-
# --------------------------------------------------------------------------- #
# SPEEDMETER Control wxPython IMPLEMENTATION
# Python Code By:
#
# Andrea Gavana, @ 25 Sep 2005
# Latest Revision: 10 Oct 2005, 22.40 CET
#
#
# TODO List/Caveats
#
# 1. Combination Of The Two Styles:
#
# SM_DRAW_PARTIAL_FILLER
# SM_DRAW_SECTORS
#
# Does Not Work Very Well. It Works Well Only In Case When The Sector Colours
# Are The Same For All Intervals.
#
#
# Thanks To Gerard Grazzini That Has Tried The Demo On MacOS, I Corrected A
# Bug On Line 246
#
#
# For All Kind Of Problems, Requests Of Enhancements And Bug Reports, Please
# Write To Me At:
#
# [email protected]
# [email protected]
#
# Or, Obviously, To The wxPython Mailing List!!!
#
# MODIFIED to add native Python wx.gizmos.LEDNubmerCtrl-type display, and a number of other things.
# by Jason Antman <http://www.jasonantman.com> <[email protected]>
# Modifications Copyright 2010 Jason Antman.
#
#
# End Of Comments
# --------------------------------------------------------------------------- #
"""Description:
SpeedMeter Tries To Reproduce The Behavior Of Some Car Controls (But Not Only),
By Creating An "Angular" Control (Actually, Circular). I Remember To Have Seen
It Somewhere, And I Decided To Implement It In wxPython.
SpeedMeter Starts Its Construction From An Empty Bitmap, And It Uses Some
Functions Of The wx.DC Class To Create The Rounded Effects. Everything Is
Processed In The Draw() Method Of SpeedMeter Class.
This Implementation Allows You To Use Either Directly The wx.PaintDC, Or The
Better (For Me) Double Buffered Style With wx.BufferedPaintDC. The Double
Buffered Implementation Has Been Adapted From The wxPython Wiki Example:
http://wiki.wxpython.org/index.cgi/DoubleBufferedDrawing
Usage:
SpeedWindow1 = SM.SpeedMeter(parent,
bufferedstyle,
extrastyle,
mousestyle
)
None Of The Options (A Part Of Parent Class) Are Strictly Required, If You
Use The Defaults You Get A Very Simple SpeedMeter. For The Full Listing Of
The Input Parameters, See The SpeedMeter __init__() Method.
Methods And Settings:
SpeedMeter Is Highly Customizable, And In Particular You Can Set:
- The Start And End Angle Of Existence For SpeedMeter;
- The Intervals In Which You Divide The SpeedMeter (Numerical Values);
- The Corresponding Thicks For The Intervals;
- The Interval Colours (Different Intervals May Have Different Filling Colours);
- The Ticks Font And Colour;
- The Background Colour (Outsize The SpeedMeter Region);
- The External Arc Colour;
- The Hand (Arrow) Colour;
- The Hand's Shadow Colour;
- The Hand's Style ("Arrow" Or "Hand");
- The Partial Filler Colour;
- The Number Of Secondary (Intermediate) Ticks;
- The Direction Of Increasing Speed ("Advance" Or "Reverse");
- The Text To Be Drawn In The Middle And Its Font;
- The Icon To Be Drawn In The Middle;
- The First And Second Gradient Colours (That Fills The SpeedMeter Control);
- The Current Value.
For More Info On Methods And Initial Styles, Please Refer To The __init__()
Method For SpeedMeter Or To The Specific Functions.
SpeedMeter Control Is Freeware And Distributed Under The wxPython License.
Latest Revision: Andrea Gavana @ 10 Oct 2005, 22.40 CET
"""
#----------------------------------------------------------------------
# Beginning Of SPEEDMETER wxPython Code
#----------------------------------------------------------------------
import wx
import wx.lib.colourdb
import wx.lib.fancytext as fancytext
import wx.gizmos as gizmos # for LEDControl
import exceptions
from math import pi, sin, cos, log, sqrt, atan2
#----------------------------------------------------------------------
# DC Drawing Options
#----------------------------------------------------------------------
# SM_NORMAL_DC Uses The Normal wx.PaintDC
# SM_BUFFERED_DC Uses The Double Buffered Drawing Style
SM_NORMAL_DC = 0
SM_BUFFERED_DC = 1
#----------------------------------------------------------------------
# SpeedMeter Styles
#----------------------------------------------------------------------
# SM_ROTATE_TEXT: Draws The Ticks Rotated: The Ticks Are Rotated
# Accordingly To The Tick Marks Positions
# SM_DRAW_SECTORS: Different Intervals Are Painted In Differend Colours
# (Every Sector Of The Circle Has Its Own Colour)
# SM_DRAW_PARTIAL_SECTORS: Every Interval Has Its Own Colour, But Only
# A Circle Corona Is Painted Near The Ticks
# SM_DRAW_HAND: The Hand (Arrow Indicator) Is Drawn
# SM_DRAW_SHADOW: A Shadow For The Hand Is Drawn
# SM_DRAW_PARTIAL_FILLER: A Circle Corona That Follows The Hand Position
# Is Drawn Near The Ticks
# SM_DRAW_SECONDARY_TICKS: Intermediate (Smaller) Ticks Are Drawn Between
# Principal Ticks
# SM_DRAW_MIDDLE_TEXT: Some Text Is Printed In The Middle Of The Control
# Near The Center
# SM_DRAW_MIDDLE_ICON: An Icon Is Drawn In The Middle Of The Control Near
# The Center
# SM_DRAW_GRADIENT: A Gradient Of Colours Will Fill The Control
# SM_DRAW_FANCY_TICKS: With This Style You Can Use XML Tags To Create
# Some Custom Text And Draw It At The Ticks Position.
# See wx.lib.fancytext For The Tags.
# SM_DRAW_BOTTOM_TEXT: Some Text Is Printed In The Bottom Of The Control
# SM_DRAW_BOTTOM_LED: A gizmos.LEDNumberCtrl-style value display is drawn at the bottom
SM_ROTATE_TEXT = 1
SM_DRAW_SECTORS = 2
SM_DRAW_PARTIAL_SECTORS = 4
SM_DRAW_HAND = 8
SM_DRAW_SHADOW = 16
SM_DRAW_PARTIAL_FILLER = 32
SM_DRAW_SECONDARY_TICKS = 64
SM_DRAW_MIDDLE_TEXT = 128
SM_DRAW_MIDDLE_ICON = 256
SM_DRAW_GRADIENT = 512
SM_DRAW_FANCY_TICKS = 1024
SM_DRAW_BOTTOM_TEXT = 2048
SM_DRAW_BOTTOM_LED = 4096
#----------------------------------------------------------------------
# Event Binding
#----------------------------------------------------------------------
# SM_MOUSE_TRACK: The Mouse Left Click/Drag Allow You To Change The
# SpeedMeter Value Interactively
SM_MOUSE_TRACK = 1
LINE1 = 1
LINE2 = 2
LINE3 = 4
LINE4 = 8
LINE5 = 16
LINE6 = 32
LINE7 = 64
DECIMALSIGN = 128
DIGIT0 = LINE1 | LINE2 | LINE3 | LINE4 | LINE5 | LINE6
DIGIT1 = LINE2 | LINE3
DIGIT2 = LINE1 | LINE2 | LINE4 | LINE5 | LINE7
DIGIT3 = LINE1 | LINE2 | LINE3 | LINE4 | LINE7
DIGIT4 = LINE2 | LINE3 | LINE6 | LINE7
DIGIT5 = LINE1 | LINE3 | LINE4 | LINE6 | LINE7
DIGIT6 = LINE1 | LINE3 | LINE4 | LINE5 | LINE6 | LINE7
DIGIT7 = LINE1 | LINE2 | LINE3
DIGIT8 = LINE1 | LINE2 | LINE3 | LINE4 | LINE5 | LINE6 | LINE7
DIGIT9 = LINE1 | LINE2 | LINE3 | LINE6 | LINE7
DASH = LINE7
DIGITALL = -1
fontfamily = range(70, 78)
familyname = ["default", "decorative", "roman", "script", "swiss", "modern", "teletype"]
weights = range(90, 93)
weightsname = ["normal", "light", "bold"]
styles = [90, 93, 94]
stylesname = ["normal", "italic", "slant"]
#----------------------------------------------------------------------
# BUFFERENDWINDOW Class
# This Class Has Been Taken From The wxPython Wiki, And Slightly
# Adapted To Fill My Needs. See:
#
# http://wiki.wxpython.org/index.cgi/DoubleBufferedDrawing
#
# For More Info About DC And Double Buffered Drawing.
#----------------------------------------------------------------------
class BufferedWindow(wx.Window):
"""
A Buffered window class.
To use it, subclass it and define a Draw(DC) method that takes a DC
to draw to. In that method, put the code needed to draw the picture
you want. The window will automatically be double buffered, and the
screen will be automatically updated when a Paint event is received.
When the drawing needs to change, you app needs to call the
UpdateDrawing() method. Since the drawing is stored in a bitmap, you
can also save the drawing to file by calling the
SaveToFile(self,file_name,file_type) method.
"""
def __init__(self, parent, id,
pos = wx.DefaultPosition,
size = wx.DefaultSize,
style=wx.NO_FULL_REPAINT_ON_RESIZE,
bufferedstyle=SM_BUFFERED_DC):
wx.Window.__init__(self, parent, id, pos, size, style)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_ERASE_BACKGROUND, lambda x: None)
# OnSize called to make sure the buffer is initialized.
# This might result in OnSize getting called twice on some
# platforms at initialization, but little harm done.
self.OnSize(None)
def Draw(self, dc):
"""
just here as a place holder.
This method should be over-ridden when sub-classed
"""
pass
def OnPaint(self, event):
"""
All that is needed here is to draw the buffer to screen
"""
if self._bufferedstyle == SM_BUFFERED_DC:
dc = wx.BufferedPaintDC(self, self._Buffer)
else:
dc = wx.PaintDC(self)
dc.DrawBitmap(self._Buffer,0,0)
def OnSize(self,event):
# The Buffer init is done here, to make sure the buffer is always
# the same size as the Window
self.Width, self.Height = self.GetClientSizeTuple()
# Make new off screen bitmap: this bitmap will always have the
# current drawing in it, so it can be used to save the image to
# a file, or whatever.
# This seems required on MacOS, it doesn't like wx.EmptyBitmap with
# size = (0, 0)
# Thanks to Gerard Grazzini
if "__WXMAC__" in wx.Platform:
if self.Width == 0:
self.Width = 1
if self.Height == 0:
self.Height = 1
self._Buffer = wx.EmptyBitmap(self.Width, self.Height)
self.UpdateDrawing()
def UpdateDrawing(self):
"""
This would get called if the drawing needed to change, for whatever reason.
The idea here is that the drawing is based on some data generated
elsewhere in the system. IF that data changes, the drawing needs to
be updated.
"""
if self._bufferedstyle == SM_BUFFERED_DC:
dc = wx.BufferedDC(wx.ClientDC(self), self._Buffer)
self.Draw(dc)
else:
# update the buffer
dc = wx.MemoryDC()
dc.SelectObject(self._Buffer)
self.Draw(dc)
# update the screen
wx.ClientDC(self).Blit(0, 0, self.Width, self.Height, dc, 0, 0)
#----------------------------------------------------------------------
# SPEEDMETER Class
# This Is The Main Class Implementation. See __init__() Method For
# Details.
#----------------------------------------------------------------------
class SpeedMeter(BufferedWindow):
"""
Class for a gauge-style display using an arc marked with tick marks and interval numbers, and a moving needle/hand/pointer.
MODIFIED to add native Python wx.gizmos.LEDNubmerCtrl-type display, and a number of other things by Jason Antman <http://www.jasonantman.com> <[email protected]>
@todo: Need to document everything (all methods).
@todo: Build example code.
@todo: Find everything used internally only and prefix methods with "__"
@todo: Find all "raise" statements, and any "print" statements that print an error, make them work with exceptions - IndexError, TypeError, RuntimeError, LookupError
@todo: change all mentions of "hand" to "needle"
@todo: make sure we have setters/getters for DrawFaded, Alignment, Value (for LED)
@todo: in client, test gradients
"""
bottomTextBottom = None
DEBUG = False # controls debugging print statements
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition,
size=wx.DefaultSize, extrastyle=SM_DRAW_HAND,
bufferedstyle=SM_BUFFERED_DC,
mousestyle=0):
""" Default Class Constructor.
Non Standard wxPython Parameters Are:
a) extrastyle: This Value Specifies The SpeedMeter Styles:
- SM_ROTATE_TEXT: Draws The Ticks Rotated: The Ticks Are Rotated
Accordingly To The Tick Marks Positions;
- SM_DRAW_SECTORS: Different Intervals Are Painted In Differend Colours
(Every Sector Of The Circle Has Its Own Colour);
- SM_DRAW_PARTIAL_SECTORS: Every Interval Has Its Own Colour, But Only
A Circle Corona Is Painted Near The Ticks;
- SM_DRAW_HAND: The Hand (Arrow Indicator) Is Drawn;
- SM_DRAW_SHADOW: A Shadow For The Hand Is Drawn;
- SM_DRAW_PARTIAL_FILLER: A Circle Corona That Follows The Hand Position
Is Drawn Near The Ticks;
- SM_DRAW_SECONDARY_TICKS: Intermediate (Smaller) Ticks Are Drawn Between
Principal Ticks;
- SM_DRAW_MIDDLE_TEXT: Some Text Is Printed In The Middle Of The Control
Near The Center;
- SM_DRAW_MIDDLE_ICON: An Icon Is Drawn In The Middle Of The Control Near
The Center;
- SM_DRAW_GRADIENT: A Gradient Of Colours Will Fill The Control;
- SM_DRAW_FANCY_TICKS: With This Style You Can Use XML Tags To Create
Some Custom Text And Draw It At The Ticks Position.
See wx.lib.fancytext For The Tags.;
- SM_DRAW_BOTTOM_TEXT: Some Text Is Printed In The Bottom Of The Control
- SM_DRAW_BOTTOM_LED: A wx.gizmos.LEDNumberCtrl-style value display is printed at the bottom
b) bufferedstyle: This Value Allows You To Use The Normal wx.PaintDC Or The
Double Buffered Drawing Options:
- SM_NORMAL_DC Uses The Normal wx.PaintDC;
- SM_BUFFERED_DC Uses The Double Buffered Drawing Style.
c) mousestyle: This Value Allows You To Use The Mouse To Change The SpeedMeter
Value Interactively With Left Click/Drag Events:
- SM_MOUSE_TRACK: The Mouse Left Click/Drag Allow You To Change The
SpeedMeter Value Interactively.
"""
self._extrastyle = extrastyle
self._bufferedstyle = bufferedstyle
self._mousestyle = mousestyle
if self._extrastyle & SM_DRAW_SECTORS and self._extrastyle & SM_DRAW_GRADIENT:
errstr = "\nERROR: Incompatible Options: SM_DRAW_SECTORS Can Not Be Used In "
errstr = errstr + "Conjunction With SM_DRAW_GRADIENT."
raise errstr
if self._extrastyle & SM_DRAW_PARTIAL_SECTORS and self._extrastyle & SM_DRAW_SECTORS:
errstr = "\nERROR: Incompatible Options: SM_DRAW_SECTORS Can Not Be Used In "
errstr = errstr + "Conjunction With SM_DRAW_PARTIAL_SECTORS."
raise errstr
if self._extrastyle & SM_DRAW_PARTIAL_SECTORS and self._extrastyle & SM_DRAW_PARTIAL_FILLER:
errstr = "\nERROR: Incompatible Options: SM_DRAW_PARTIAL_SECTORS Can Not Be Used In "
errstr = errstr + "Conjunction With SM_DRAW_PARTIAL_FILLER."
raise errstr
if self._extrastyle & SM_DRAW_FANCY_TICKS and self._extrastyle & SM_ROTATE_TEXT:
errstr = "\nERROR: Incompatible Options: SM_DRAW_FANCY_TICKS Can Not Be Used In "
errstr = errstr + "Conjunction With SM_ROTATE_TEXT."
raise errstr
if self._extrastyle & SM_DRAW_SHADOW and self._extrastyle & SM_DRAW_HAND == 0:
errstr = "\nERROR: Incompatible Options: SM_DRAW_SHADOW Can Be Used Only In "
errstr = errstr + "Conjunction With SM_DRAW_HAND."
if self._extrastyle & SM_DRAW_FANCY_TICKS:
wx.lib.colourdb.updateColourDB()
self.SetValueMultiplier() # for LED control
self.SetAngleRange()
self.SetIntervals()
self.SetSpeedValue()
self.SetIntervalColours()
self.SetArcColour()
self.SetTicks()
self.SetTicksFont()
self.SetTicksColour()
self.SetSpeedBackground()
self.SetHandColour()
self.SetShadowColour()
self.SetFillerColour()
self.SetDirection()
self.SetNumberOfSecondaryTicks()
self.SetMiddleText()
self.SetMiddleTextFont()
self.SetMiddleTextColour()
self.SetBottomText()
self.SetBottomTextFont()
self.SetBottomTextColour()
self.SetFirstGradientColour()
self.SetSecondGradientColour()
self.SetHandStyle()
self.DrawExternalArc()
self.DrawExternalCircle()
# for LED control
self._LEDwidth = 0
self._LEDheight = 0
self._LEDx = 0
self._LEDy = 0
self._InitLEDInternals()
self.SetLEDAlignment()
self.SetDrawFaded()
BufferedWindow.__init__(self, parent, id, pos, size,
style=wx.NO_FULL_REPAINT_ON_RESIZE,
bufferedstyle=bufferedstyle)
if self._mousestyle & SM_MOUSE_TRACK:
self.Bind(wx.EVT_MOUSE_EVENTS, self.OnMouseMotion)
def Draw(self, dc):
"""
Draws Everything On The Empty Bitmap.
Here All The Chosen Styles Are Applied.
GIGANTIC HUMONGOUS UGLY function that draws I{everything} on the bitmap except for the LEDs.
@param dc: the dc
@type dc: L{wx.BufferedDC}
"""
size = self.GetClientSize()
if size.x < 21 or size.y < 21:
return
new_dim = size.Get()
if not hasattr(self, "dim"):
self.dim = new_dim
self.scale = min([float(new_dim[0]) / self.dim[0],
float(new_dim[1]) / self.dim[1]])
# Create An Empty Bitmap
self.faceBitmap = wx.EmptyBitmap(size.width, size.height)
dc.BeginDrawing()
speedbackground = self.GetSpeedBackground()
# Set Background Of The Control
dc.SetBackground(wx.Brush(speedbackground))
dc.Clear()
centerX = self.faceBitmap.GetWidth()/2
centerY = self.faceBitmap.GetHeight()/2
self.CenterX = centerX
self.CenterY = centerY
# Get The Radius Of The Sector. Set It A Bit Smaller To Correct Draw After
radius = min(centerX, centerY) - 2
self.Radius = radius
# Get The Angle Of Existance Of The Sector
anglerange = self.GetAngleRange()
startangle = anglerange[1]
endangle = anglerange[0]
self.StartAngle = startangle
self.EndAngle = endangle
# Initialize The Colours And The Intervals - Just For Reference To The
# Children Functions
colours = None
intervals = None
if self._extrastyle & SM_DRAW_SECTORS or self._extrastyle & SM_DRAW_PARTIAL_SECTORS:
# Get The Intervals Colours
colours = self.GetIntervalColours()[:]
textangles = []
colourangles = []
xcoords = []
ycoords = []
# Get The Intervals (Partial Sectors)
intervals = self.GetIntervals()[:]
start = min(intervals)
end = max(intervals)
span = end - start
self.StartValue = start
self.EndValue = end
self.Span = span
# Get The Current Value For The SpeedMeter
currentvalue = self.GetSpeedValue()
# Get The Direction Of The SpeedMeter
direction = self.GetDirection()
if direction == "Reverse":
intervals.reverse()
if self._extrastyle & SM_DRAW_SECTORS or self._extrastyle & SM_DRAW_PARTIAL_SECTORS:
colours.reverse()
currentvalue = end - currentvalue
# This Because DrawArc Does Not Draw Last Point
offset = 0.1*self.scale/180.0
xstart, ystart = self.__CircleCoords(radius+1, -endangle, centerX, centerY)
xend, yend = self.__CircleCoords(radius+1, -startangle-offset, centerX, centerY)
# Calculate The Angle For The Current Value Of SpeedMeter
accelangle = (currentvalue - start)/float(span)*(startangle-endangle) - startangle
dc.SetPen(wx.TRANSPARENT_PEN)
if self._extrastyle & SM_DRAW_PARTIAL_FILLER:
# Get Some Data For The Partial Filler
fillercolour = self.GetFillerColour()
fillerendradius = radius - 10.0*self.scale
fillerstartradius = radius
if direction == "Advance":
fillerstart = accelangle
fillerend = -startangle
else:
fillerstart = -endangle
fillerend = accelangle
xs1, ys1 = self.__CircleCoords(fillerendradius, fillerstart, centerX, centerY)
xe1, ye1 = self.__CircleCoords(fillerendradius, fillerend, centerX, centerY)
xs2, ys2 = self.__CircleCoords(fillerstartradius, fillerstart, centerX, centerY)
xe2, ye2 = self.__CircleCoords(fillerstartradius, fillerend, centerX, centerY)
# Get The Sector In Which The Current Value Is
intersection = self.__GetIntersection(currentvalue, intervals)
sectorradius = radius - 10*self.scale
else:
sectorradius = radius
if self._extrastyle & SM_DRAW_PARTIAL_FILLER:
# Draw The Filler (Both In "Advance" And "Reverse" Directions)
dc.SetBrush(wx.Brush(fillercolour))
dc.DrawArc(xs2, ys2, xe2, ye2, centerX, centerY)
if self._extrastyle & SM_DRAW_SECTORS == 0:
dc.SetBrush(wx.Brush(speedbackground))
xclean1, yclean1 = self.__CircleCoords(sectorradius, -endangle, centerX, centerY)
xclean2, yclean2 = self.__CircleCoords(sectorradius, -startangle-offset, centerX, centerY)
dc.DrawArc(xclean1, yclean1, xclean2, yclean2, centerX, centerY)
# This Is Needed To Fill The Partial Sector Correctly
xold, yold = self.__CircleCoords(radius, startangle+endangle, centerX, centerY)
# Draw The Sectors
for ii, interval in enumerate(intervals):
if direction == "Advance":
current = interval - start
else:
current = end - interval
angle = (current/float(span))*(startangle-endangle) - startangle
angletext = -((pi/2.0) + angle)*180/pi
textangles.append(angletext)
colourangles.append(angle)
xtick, ytick = self.__CircleCoords(radius, angle, centerX, centerY)
# Keep The Coordinates, We Will Need Them After To Position The Ticks
xcoords.append(xtick)
ycoords.append(ytick)
x = xtick
y = ytick
if self._extrastyle & SM_DRAW_SECTORS:
if self._extrastyle & SM_DRAW_PARTIAL_FILLER:
if direction == "Advance":
if current > currentvalue:
x, y = self.__CircleCoords(radius, angle, centerX, centerY)
else:
x, y = self.__CircleCoords(sectorradius, angle, centerX, centerY)
else:
if current < end - currentvalue:
x, y = self.__CircleCoords(radius, angle, centerX, centerY)
else:
x, y = self.__CircleCoords(sectorradius, angle, centerX, centerY)
else:
x, y = self.__CircleCoords(radius, angle, centerX, centerY)
if ii > 0:
if self._extrastyle & SM_DRAW_PARTIAL_FILLER and ii == intersection:
# We Got The Interval In Which There Is The Current Value. If We Choose
# A "Reverse" Direction, First We Draw The Partial Sector, Next The Filler
dc.SetBrush(wx.Brush(speedbackground))
if direction == "Reverse":
if self._extrastyle & SM_DRAW_SECTORS:
dc.SetBrush(wx.Brush(colours[ii-1]))
dc.DrawArc(xe2, ye2, xold, yold, centerX, centerY)
if self._extrastyle & SM_DRAW_SECTORS:
dc.SetBrush(wx.Brush(colours[ii-1]))
else:
dc.SetBrush(wx.Brush(speedbackground))
dc.DrawArc(xs1, ys1, xe1, ye1, centerX, centerY)
if self._extrastyle & SM_DRAW_SECTORS:
dc.SetBrush(wx.Brush(colours[ii-1]))
# Here We Draw The Rest Of The Sector In Which The Current Value Is
if direction == "Advance":
dc.DrawArc(xs1, ys1, x, y, centerX, centerY)
x = xs1
y = ys1
else:
dc.DrawArc(xe2, ye2, x, y, centerX, centerY)
elif self._extrastyle & SM_DRAW_SECTORS:
dc.SetBrush(wx.Brush(colours[ii-1]))
# Here We Still Use The SM_DRAW_PARTIAL_FILLER Style, But We Are Not
# In The Sector Where The Current Value Resides
if self._extrastyle & SM_DRAW_PARTIAL_FILLER and ii != intersection:
if direction == "Advance":
dc.DrawArc(x, y, xold, yold, centerX, centerY)
else:
if ii < intersection:
dc.DrawArc(x, y, xold, yold, centerX, centerY)
# This Is The Case Where No SM_DRAW_PARTIAL_FILLER Has Been Chosen
else:
dc.DrawArc(x, y, xold, yold, centerX, centerY)
else:
if self._extrastyle & SM_DRAW_PARTIAL_FILLER and self._extrastyle & SM_DRAW_SECTORS:
dc.SetBrush(wx.Brush(fillercolour))
dc.DrawArc(xs2, ys2, xe2, ye2, centerX, centerY)
x, y = self.__CircleCoords(sectorradius, angle, centerX, centerY)
dc.SetBrush(wx.Brush(colours[ii]))
dc.DrawArc(xs1, ys1, xe1, ye1, centerX, centerY)
x = xs2
y = ys2
xold = x
yold = y
if self._extrastyle & SM_DRAW_PARTIAL_SECTORS:
sectorendradius = radius - 10.0*self.scale
sectorstartradius = radius
xps, yps = self.__CircleCoords(sectorstartradius, angle, centerX, centerY)
if ii > 0:
dc.SetBrush(wx.Brush(colours[ii-1]))
dc.DrawArc(xps, yps, xpsold, ypsold, centerX, centerY)
xpsold = xps
ypsold = yps
if self._extrastyle & SM_DRAW_PARTIAL_SECTORS:
xps1, yps1 = self.__CircleCoords(sectorendradius, -endangle+2*offset, centerX, centerY)
xps2, yps2 = self.__CircleCoords(sectorendradius, -startangle-2*offset, centerX, centerY)
dc.SetBrush(wx.Brush(speedbackground))
dc.DrawArc(xps1, yps1, xps2, yps2, centerX, centerY)
if self._extrastyle & SM_DRAW_GRADIENT:
dc.SetPen(wx.TRANSPARENT_PEN)
xcurrent, ycurrent = self.__CircleCoords(radius, accelangle, centerX, centerY)
# calculate gradient coefficients
col2 = self.GetSecondGradientColour()
col1 = self.GetFirstGradientColour()
r1, g1, b1 = int(col1.Red()), int(col1.Green()), int(col1.Blue())
r2, g2, b2 = int(col2.Red()), int(col2.Green()), int(col2.Blue())
flrect = float(radius+self.scale)
numsteps = 200
rstep = float((r2 - r1)) / numsteps
gstep = float((g2 - g1)) / numsteps
bstep = float((b2 - b1)) / numsteps
rf, gf, bf = 0, 0, 0
radiusteps = flrect/numsteps
interface = 0
for ind in range(numsteps+1):
currCol = (r1 + rf, g1 + gf, b1 + bf)
dc.SetBrush(wx.Brush(currCol))
gradradius = flrect - radiusteps*ind
xst1, yst1 = self.__CircleCoords(gradradius, -endangle, centerX, centerY)
xen1, yen1 = self.__CircleCoords(gradradius, -startangle-offset, centerX, centerY)
if self._extrastyle & SM_DRAW_PARTIAL_FILLER:
if gradradius >= fillerendradius:
if direction == "Advance":
dc.DrawArc(xstart, ystart, xcurrent, ycurrent, centerX, centerY)
else:
dc.DrawArc(xcurrent, ycurrent, xend, yend, centerX, centerY)
else:
if interface == 0:
interface = 1
myradius = fillerendradius + 1
xint1, yint1 = self.__CircleCoords(myradius, -endangle, centerX, centerY)
xint2, yint2 = self.__CircleCoords(myradius, -startangle-offset, centerX, centerY)
dc.DrawArc(xint1, yint1, xint2, yint2, centerX, centerY)
dc.DrawArc(xst1, yst1, xen1, yen1, centerX, centerY)
else:
if self._extrastyle & SM_DRAW_PARTIAL_SECTORS:
if gradradius <= sectorendradius:
if interface == 0:
interface = 1
myradius = sectorendradius + 1
xint1, yint1 = self.__CircleCoords(myradius, -endangle, centerX, centerY)
xint2, yint2 = self.__CircleCoords(myradius, -startangle-offset, centerX, centerY)
dc.DrawArc(xint1, yint1, xint2, yint2, centerX, centerY)
else:
dc.DrawArc(xst1, yst1, xen1, yen1, centerX, centerY)
else:
dc.DrawArc(xst1, yst1, xen1, yen1, centerX, centerY)
rf = rf + rstep
gf = gf + gstep
bf = bf + bstep
textheight = 0
# Get The Ticks And The Ticks Colour
ticks = self.GetTicks()[:]
tickscolour = self.GetTicksColour()
if direction == "Reverse":
ticks.reverse()
if self._extrastyle & SM_DRAW_SECONDARY_TICKS:
ticknum = self.GetNumberOfSecondaryTicks()
oldinterval = intervals[0]
dc.SetPen(wx.Pen(tickscolour, 1))
dc.SetBrush(wx.Brush(tickscolour))
dc.SetTextForeground(tickscolour)
# Get The Font For The Ticks
tfont, fontsize = self.GetTicksFont()
tfont = tfont[0]
myfamily = tfont.GetFamily()
fsize = self.scale*fontsize
tfont.SetPointSize(int(fsize))
tfont.SetFamily(myfamily)
dc.SetFont(tfont)
if self._extrastyle & SM_DRAW_FANCY_TICKS:
facename = tfont.GetFaceName()
ffamily = familyname[fontfamily.index(tfont.GetFamily())]
fweight = weightsname[weights.index(tfont.GetWeight())]
fstyle = stylesname[styles.index(tfont.GetStyle())]
fcolour = wx.TheColourDatabase.FindName(tickscolour)
textheight = 0
# Draw The Ticks And The Markers (Text Ticks)
for ii, angles in enumerate(textangles):
strings = ticks[ii]
if self._extrastyle & SM_DRAW_FANCY_TICKS == 0:
width, height, dummy, dummy = dc.GetFullTextExtent(strings, tfont)
textheight = height
else:
width, height, dummy = fancytext.GetFullExtent(strings, dc)
textheight = height
lX = dc.GetCharWidth()/2.0
lY = dc.GetCharHeight()/2.0
if self._extrastyle & SM_ROTATE_TEXT:
angis = colourangles[ii] - float(width)/(2.0*radius)
x, y = self.__CircleCoords(radius-10.0*self.scale, angis, centerX, centerY)
dc.DrawRotatedText(strings, x, y, angles)
else:
angis = colourangles[ii]
if self._extrastyle & SM_DRAW_FANCY_TICKS == 0:
x, y = self.__CircleCoords(radius-10*self.scale, angis, centerX, centerY)
lX = lX*len(strings)
x = x - lX - width*cos(angis)/2.0
y = y - lY - height*sin(angis)/2.0
if self._extrastyle & SM_DRAW_FANCY_TICKS:
fancystr = '<font family="' + ffamily + '" size="' + str(int(fsize)) + '" weight="' + fweight + '"'
fancystr = fancystr + ' color="' + fcolour + '"' + ' style="' + fstyle + '"> ' + strings + ' </font>'
width, height, dummy = fancytext.GetFullExtent(fancystr, dc)
x, y = self.__CircleCoords(radius-10*self.scale, angis, centerX, centerY)
x = x - width/2.0 - width*cos(angis)/2.0
y = y - height/2.0 - height*sin(angis)/2.0
fancytext.RenderToDC(fancystr, dc, x, y)
else:
dc.DrawText(strings, x, y)
# This Is The Small Rectangle --> Tick Mark
rectangle = colourangles[ii] + pi/2.0
sinrect = sin(rectangle)
cosrect = cos(rectangle)
x1 = xcoords[ii] - self.scale*cosrect
y1 = ycoords[ii] - self.scale*sinrect
x2 = x1 + 3*self.scale*cosrect
y2 = y1 + 3*self.scale*sinrect
x3 = x1 - 10*self.scale*sinrect
y3 = y1 + 10*self.scale*cosrect
x4 = x3 + 3*self.scale*cosrect
y4 = y3 + 3*self.scale*sinrect
points = [(x1, y1), (x2, y2), (x4, y4), (x3, y3)]
dc.DrawPolygon(points)
if self._extrastyle & SM_DRAW_SECONDARY_TICKS:
if ii > 0:
newinterval = intervals[ii]
oldinterval = intervals[ii-1]
spacing = (newinterval - oldinterval)/float(ticknum+1)
for tcount in xrange(ticknum):
if direction == "Advance":
oldinterval = (oldinterval + spacing) - start
stint = oldinterval
else:
oldinterval = start + (oldinterval + spacing)
stint = end - oldinterval
angle = (stint/float(span))*(startangle-endangle) - startangle
rectangle = angle + pi/2.0
sinrect = sin(rectangle)
cosrect = cos(rectangle)
xt, yt = self.__CircleCoords(radius, angle, centerX, centerY)
x1 = xt - self.scale*cosrect
y1 = yt - self.scale*sinrect
x2 = x1 + self.scale*cosrect
y2 = y1 + self.scale*sinrect
x3 = x1 - 6*self.scale*sinrect
y3 = y1 + 6*self.scale*cosrect
x4 = x3 + self.scale*cosrect
y4 = y3 + self.scale*sinrect
points = [(x1, y1), (x2, y2), (x4, y4), (x3, y3)]
dc.DrawPolygon(points)
oldinterval = newinterval
tfont.SetPointSize(fontsize)
tfont.SetFamily(myfamily)
self.SetTicksFont(tfont)
# Draw The External Arc
dc.SetBrush(wx.TRANSPARENT_BRUSH)
if self._drawarc and not self._drawfullarc:
dc.SetPen(wx.Pen(self.GetArcColour(), 2.0))
# If It's Not A Complete Circle, Draw The Connecting Lines And The Arc
if abs(abs(startangle - endangle) - 2*pi) > 1.0/180.0:
dc.DrawArc(xstart, ystart, xend, yend, centerX, centerY)
dc.DrawLine(xstart, ystart, centerX, centerY)
dc.DrawLine(xend, yend, centerX, centerY)
else:
# Draw A Circle, Is A 2*pi Extension Arc = Complete Circle
dc.DrawCircle(centerX, centerY, radius)
if self._drawfullarc:
dc.DrawCircle(centerX, centerY, radius)
# Here We Draw The Text In The Middle, Near The Start Of The Arrow (If Present)
# This Is Like The "Km/h" Or "mph" Text In The Cars
if self._extrastyle & SM_DRAW_MIDDLE_TEXT:
middlecolour = self.GetMiddleTextColour()
middletext = self.GetMiddleText()
middleangle = (startangle + endangle)/2.0
middlefont, middlesize = self.GetMiddleTextFont()
middlesize = self.scale*middlesize
middlefont.SetPointSize(int(middlesize))
dc.SetFont(middlefont)
mw, mh, dummy, dummy = dc.GetFullTextExtent(middletext, middlefont)
newx = centerX + 1.5*mw*cos(middleangle) - mw/2.0
newy = centerY - 1.5*mh*sin(middleangle) - mh/2.0
dc.SetTextForeground(middlecolour)
dc.DrawText(middletext, newx, newy)
# Here We Draw The Text In The Bottom
# This Is Like The "Km/h" Or "mph" Text In The Cars
if self._extrastyle & SM_DRAW_BOTTOM_TEXT:
bottomcolour = self.GetBottomTextColour()
bottomtext = self.GetBottomText()
# hack for two lines of text
if bottomtext.find("\n") != -1:
# we have a newline
foo = bottomtext.partition("\n")
bottomtext1 = foo[0]
bottomtext2 = foo[2]
bottomangle = (startangle + endangle)/2.0
bottomfont, bottomsize = self.GetBottomTextFont()
bottomsize = self.scale*bottomsize
bottomfont.SetPointSize(int(bottomsize))
dc.SetFont(bottomfont)
mw, mh, dummy, dummy = dc.GetFullTextExtent(bottomtext1, bottomfont)
newx = centerX + 1.5*mw*cos(bottomangle) - mw/2.0
newy = ystart
yoffset = mh + (mh * 2)
dc.SetTextForeground(bottomcolour)
dc.DrawText(bottomtext1, newx, newy)
mw, mh, dummy, dummy = dc.GetFullTextExtent(bottomtext2, bottomfont)
newx = centerX + 1.5*mw*cos(bottomangle) - mw/2.0
newy = ystart + yoffset
dc.SetTextForeground(bottomcolour)
dc.DrawText(bottomtext2, newx, newy)
else:
bottomangle = (startangle + endangle)/2.0
bottomfont, bottomsize = self.GetBottomTextFont()
bottomsize = self.scale*bottomsize
bottomfont.SetPointSize(int(bottomsize))
dc.SetFont(bottomfont)
mw, mh, dummy, dummy = dc.GetFullTextExtent(bottomtext, bottomfont)
newx = centerX + 1.5*mw*cos(bottomangle) - mw/2.0
newy = ystart
dc.SetTextForeground(bottomcolour)
dc.DrawText(bottomtext, newx, newy)
self.bottomTextBottom = (int)(newy + mh)
# Here We Draw The Icon In The Middle, Near The Start Of The Arrow (If Present)
# This Is Like The "Fuel" Icon In The Cars
if self._extrastyle & SM_DRAW_MIDDLE_ICON:
middleicon = self.GetMiddleIcon()
middlewidth, middleheight = self.__GetMiddleIconDimens()
middleicon.SetWidth(middlewidth*self.scale)
middleicon.SetHeight(middleheight*self.scale)
middleangle = (startangle + endangle)/2.0
mw = middleicon.GetWidth()
mh = middleicon.GetHeight()
newx = centerX + 1.5*mw*cos(middleangle) - mw/2.0
newy = centerY - 1.5*mh*sin(middleangle) - mh/2.0
dc.DrawIcon(middleicon, newx, newy)
# Restore Icon Dimension, If Not Something Strange Happens
middleicon.SetWidth(middlewidth)
middleicon.SetHeight(middleheight)
# Requested To Draw The Hand
if self._extrastyle & SM_DRAW_HAND:
handstyle = self.GetHandStyle()
handcolour = self.GetHandColour()
# Calculate The Data For The Hand
if textheight == 0:
maxradius = radius-10*self.scale
else:
maxradius = radius-5*self.scale-textheight
xarr, yarr = self.__CircleCoords(maxradius, accelangle, centerX, centerY)
if handstyle == "Arrow":
x1, y1 = self.__CircleCoords(maxradius, accelangle - 4.0/180, centerX, centerY)
x2, y2 = self.__CircleCoords(maxradius, accelangle + 4.0/180, centerX, centerY)
x3, y3 = self.__CircleCoords(maxradius+3*(abs(xarr-x1)), accelangle, centerX, centerY)
newx = centerX + 4*cos(accelangle)*self.scale
newy = centerY + 4*sin(accelangle)*self.scale
else:
x1 = centerX + 4*self.scale*sin(accelangle)
y1 = centerY - 4*self.scale*cos(accelangle)
x2 = xarr
y2 = yarr
x3 = centerX - 4*self.scale*sin(accelangle)
y3 = centerY + 4*self.scale*cos(accelangle)
x4, y4 = self.__CircleCoords(5*self.scale*sqrt(3), accelangle+pi, centerX, centerY)
if self._extrastyle & SM_DRAW_SHADOW:
if handstyle == "Arrow":
# Draw The Shadow
shadowcolour = self.GetShadowColour()
dc.SetPen(wx.Pen(shadowcolour, 5*log(self.scale+1)))
dc.SetBrush(wx.Brush(shadowcolour))
shadowdistance = 2.0*self.scale
dc.DrawLine(newx + shadowdistance, newy + shadowdistance,
xarr + shadowdistance, yarr + shadowdistance)
dc.DrawPolygon([(x1+shadowdistance, y1+shadowdistance),
(x2+shadowdistance, y2+shadowdistance),
(x3+shadowdistance, y3+shadowdistance)])
else:
# Draw The Shadow
shadowcolour = self.GetShadowColour()
dc.SetBrush(wx.Brush(shadowcolour))
dc.SetPen(wx.Pen(shadowcolour, 1.0))
shadowdistance = 1.5*self.scale
dc.DrawPolygon([(x1+shadowdistance, y1+shadowdistance),
(x2+shadowdistance, y2+shadowdistance),
(x3+shadowdistance, y3+shadowdistance),
(x4+shadowdistance, y4+shadowdistance)])
if handstyle == "Arrow":
dc.SetPen(wx.Pen(handcolour, 1.5))
# Draw The Small Circle In The Center --> The Hand "Holder"
dc.SetBrush(wx.Brush(speedbackground))
dc.DrawCircle(centerX, centerY, 4*self.scale)
dc.SetPen(wx.Pen(handcolour, 5*log(self.scale+1)))
# Draw The "Hand", An Arrow
dc.DrawLine(newx, newy, xarr, yarr)
# Draw The Arrow Pointer
dc.SetBrush(wx.Brush(handcolour))
dc.DrawPolygon([(x1, y1), (x2, y2), (x3, y3)])
else:
# Draw The Hand Pointer
dc.SetPen(wx.Pen(handcolour, 1.5))
dc.SetBrush(wx.Brush(handcolour))
dc.DrawPolygon([(x1, y1), (x2, y2), (x3, y3), (x4, y4)])
# Draw The Small Circle In The Center --> The Hand "Holder"
dc.SetBrush(wx.Brush(speedbackground))
dc.DrawCircle(centerX, centerY, 4*self.scale)
# here is where we draw the LEDNumberCtrl-style display at the bottom, if requested
if self._extrastyle & SM_DRAW_BOTTOM_LED:
self._DrawLED(dc, centerX)
dc.EndDrawing()
def SetIntervals(self, intervals=None):
"""
Sets The Intervals For SpeedMeter (Main Ticks Numeric Values).
@param intervals: list of the interval end points
@type intervals: L{list} of L{int}s or L{float}s, one marking the end of each interval
"""
if intervals is None:
intervals = [0, 50, 100]
self._intervals = intervals
def GetIntervals(self):
"""
Gets The Intervals For SpeedMeter.
@rtype: L{list} of L{int}s or L{float}s, one marking the end of each interval
"""
return self._intervals
def GetBottomTextBottom(self):
"""
Gets the Y position of the bottom of the BottomText.
Used to position the LEDNumberCtrl if one is present.
@return: Y position of the bottom of the BottomText on the BufferedWindow (DC)
@rtype: int
"""
return self.bottomTextBottom
def GetWidth(self):
"""
Gets the whole width of the SpeedMeter.
Used to position the LEDNumberCtrl if present.
@return: Width (px) of the whole faceBitmap
@rtype: int
"""
return self.faceBitmap.GetWidth()
def SetSpeedValue(self, value=None):
"""
Sets The Current Value For SpeedMeter.
Please also see L{SetValueMultiplier}() function.
The value MUST be within the range specified by the L{intervals} (see L{GetIntervals}).
Calling this function will trigger the L{UpdateDrawing}() method to redraw.
@param value: the desired value
@type value: L{int} or L{float}
"""
if value is None:
value = (max(self._intervals) - min(self._intervals))/2.0
else:
if not (isinstance(value, int) or isinstance(value, float)):
raise TypeError("value parameter of SetSpeedValue must be of int or float type, not " + str(type(value)))
if value < min(self._intervals):
raise IndexError("value parameter of SetSpeedValue is smaller than the minimum element in the points (intervals) list")
elif value > max(self._intervals):
raise IndexError("value parameter of SetSpeedValue Greater Than Maximum Element In Points List")
self._speedvalue = value
self._speedStr = str(int(value * self._ValueMultiplier))
try:
self.UpdateDrawing()
except:
pass
def GetSpeedValue(self):
"""
Gets The Current Value For SpeedMeter.
@rtype: L{int} or L{float}
"""
return self._speedvalue
def SetAngleRange(self, start=0, end=pi):
"""
Sets The Range Of Existence For SpeedMeter.
This Values *Must* Be Specifiend In RADIANS.
@param start: the start angle (default 0)
@type start: L{int} in radians
@param end: the end angle (default pi)
@type end: L{int} in radians
"""
self._anglerange = [start, end]
def GetAngleRange(self):
"""
Gets The Range Of Existence For SpeedMeter.
The Returned Values Are In RADIANS.
@rtype: L{list} of L{int}s (radians) like [start, end]
"""
return self._anglerange
def SetIntervalColours(self, colours=None):
"""
Sets The Colours For The Intervals.
Every Intervals (Circle Sector) Should Have A Colour.
Expects a list of L{wx.Colour}s of the same length as the number of intervals.
@param colours: list of colours to use for intervals
@type colours: L{list} of L{wx.Colour}s of same length as number of intervals
"""
if colours is None:
if not hasattr(self, "_anglerange"):
errstr = "\nERROR: Impossible To Set Interval Colours,"
errstr = errstr + " Please Define The Intervals Ranges Before."
raise errstr
return
colours = [wx.WHITE]*len(self._intervals)
else:
if len(colours) != len(self._intervals) - 1:
errstr = "\nERROR: Length Of Colour List Does Not Match Length"
errstr = errstr + " Of Intervals Ranges List."
print errstr
raise errstr
return
self._intervalcolours = colours
def GetIntervalColours(self):
"""
Gets The Colours For The Intervals.
@rtype: L{list} of L{wx.Colour}s
"""
if hasattr(self, "_intervalcolours"):
return self._intervalcolours
else:
raise "\nERROR: No Interval Colours Have Been Defined"
def SetTicks(self, ticks=None):
"""
Sets The Ticks For SpeedMeter Intervals (Main Ticks String Values).
Must be a list of strings, of the same length as the number of intervals.
This should probably not be called from outside the class, unless you want to set the interval ticks to something weird (maybe a fuel meter using "1/4", "1/2", etc.).
It is probably better to use the L{SetValueMultiplier}() function if you're dealing with linear integers.
@param ticks: list of strings, of the same length as the number of intervals.
@type ticks: L{list} of L{string}s
"""
if ticks is None:
if not hasattr(self, "_anglerange"):
errstr = "\nERROR: Impossible To Set Interval Ticks,"
errstr = errstr + " Please Define The Intervals Ranges Before."
raise errstr
return
ticks = []
for values in self._intervals:
ticks.append(str(values))
else:
if len(ticks) != len(self._intervals):
errstr = "\nERROR: Length Of Ticks List Does Not Match Length"
errstr = errstr + " Of Intervals Ranges List."
raise errstr
return
self._intervalticks = ticks
def GetTicks(self):
"""
Gets The Ticks For SpeedMeter Intervals (Main Ticks String Values).
@rtype: L{list} of L{string}s
"""
if hasattr(self, "_intervalticks"):
return self._intervalticks
else:
raise "\nERROR: No Interval Ticks Have Been Defined"
def SetTicksFont(self, font=None):
"""
Sets The Ticks Font.
@param font: the font for the text (default 10pt, wx.Font(1, wx.SWISS, wx.NORMAL, wx.BOLD, False))
@type font: L{wx.Font}
"""
if font is None:
self._originalfont = [wx.Font(10, wx.SWISS, wx.NORMAL, wx.BOLD, False)]
self._originalsize = 10
else:
self._originalfont = [font]
self._originalsize = font.GetPointSize()
def GetTicksFont(self):
"""
Gets The Ticks Font.
@rtype: L{tuple} of (L{wx.Font}, L{float} size)
"""
return self._originalfont[:], self._originalsize
def SetTicksColour(self, colour=None):
"""
Sets The Ticks Colour.
@param colour
@type colour: L{wx.Colour}
"""
if colour is None:
colour = wx.BLUE
self._tickscolour = colour
def GetTicksColour(self):
"""
Gets The Ticks Colour.
@rtype: L{wx.Colour}
"""
return self._tickscolour
def SetSpeedBackground(self, colour=None):
"""
Sets The Background Colour Outside The SpeedMeter Control.
@param colour
@type colour: L{wx.Colour}
"""
if colour is None:
colour = wx.SystemSettings_GetColour(0)
self._speedbackground = colour
def GetSpeedBackground(self):
"""
Gets The Background Colour Outside The SpeedMeter Control.
@rtype: L{wx.Colour}
"""
return self._speedbackground
def SetHandColour(self, colour=None):
"""
Sets The Hand (Arrow Indicator) Colour.
@param colour
@type colour: L{wx.Colour}
"""
if colour is None:
colour = wx.RED
self._handcolour = colour
def GetHandColour(self):
"""
Gets The Hand (Arrow Indicator) Colour.
@rtype: L{wx.Colour}
"""
return self._handcolour
def SetArcColour(self, colour=None):
"""
Sets The External Arc Colour (Thicker Line).
@param colour
@type colour: L{wx.Colour}
"""
if colour is None:
colour = wx.BLACK
self._arccolour = colour
def GetArcColour(self):
"""
Gets The External Arc Colour.
@rtype: L{wx.Colour}
"""
return self._arccolour
def SetShadowColour(self, colour=None):
"""
Sets The Hand's Shadow Colour.
@param colour
@type colour: L{wx.Colour}
"""
if colour is None:
colour = wx.Colour(150, 150, 150)
self._shadowcolour = colour
def GetShadowColour(self):
"""
Gets The Hand's Shadow Colour.
@rtype: L{wx.Colour}
"""
return self._shadowcolour
def SetFillerColour(self, colour=None):
"""
Sets The Partial Filler Colour.
A Circle Corona Near The Ticks Will Be Filled With This Colour, From
The Starting Value To The Current Value Of SpeedMeter.
@param colour: the colour
@type colour: L{wx.Colour}
"""
if colour is None:
colour = wx.Colour(255, 150, 50)
self._fillercolour = colour
def GetFillerColour(self):
"""
Gets The Partial Filler Colour.
@rtype: L{wx.Colour}
"""
return self._fillercolour
def SetDirection(self, direction=None):
"""
Sets The Direction Of Advancing SpeedMeter Value.
Specifying "Advance" Will Move The Hand In Clock-Wise Direction (Like Normal
Car Speed Control), While Using "Reverse" Will Move It CounterClock-Wise
Direction.
@param direction: direction of needle movement
@type direction: L{string} "Advance" (default) or "Reverse"
"""
if direction is None:
direction = "Advance"
if direction not in ["Advance", "Reverse"]:
raise '\nERROR: Direction Parameter Should Be One Of "Advance" Or "Reverse".'
return
self._direction = direction
def GetDirection(self):
"""
Gets The Direction Of Advancing SpeedMeter Value.
@rtype: L{string} "Advance" or "Reverse"
"""
return self._direction
def SetNumberOfSecondaryTicks(self, ticknum=None):
"""
Sets The Number Of Secondary (Intermediate) Ticks.
@param ticknum: number of secondary ticks (MUST be >= 1, default is 3)
@type ticknum: L{int}
"""
if ticknum is None:
ticknum = 3
if ticknum < 1:
raise "\nERROR: Number Of Ticks Must Be Greater Than 1."
return
self._secondaryticks = ticknum
def GetNumberOfSecondaryTicks(self):
"""
Gets The Number Of Secondary (Intermediate) Ticks.
@rtype: L{int}
"""
return self._secondaryticks
def SetMiddleText(self, text=None):
"""
Sets The Text To Be Drawn Near The Center Of SpeedMeter.
@param text: the text to draw
@type text: L{string}
"""
if text is None:
text = ""
self._middletext = text
def GetMiddleText(self):
"""
Gets The Text To Be Drawn Near The Center Of SpeedMeter.
@rtype: L{string}
"""
return self._middletext
def SetMiddleTextFont(self, font=None):
"""
Sets The Font For The Text In The Middle.
@param font: the font for the text (default 10pt, wx.Font(1, wx.SWISS, wx.NORMAL, wx.BOLD, False))
@type font: L{wx.Font}
"""
if font is None:
self._middletextfont = wx.Font(1, wx.SWISS, wx.NORMAL, wx.BOLD, False)
self._middletextsize = 10.0
self._middletextfont.SetPointSize(self._middletextsize)
else:
self._middletextfont = font
self._middletextsize = font.GetPointSize()
self._middletextfont.SetPointSize(self._middletextsize)
def GetMiddleTextFont(self):
"""
Gets The Font For The Text In The Middle.
@rtype: L{tuple} of (L{wx.Font}, L{float} size)
"""
return self._middletextfont, self._middletextsize
def SetMiddleTextColour(self, colour=None):
"""
Sets The Colour For The Text In The Middle.
@param colour: the colour for the text
@type colour: L{wx.Colour}
"""
if colour is None:
colour = wx.BLUE
self._middlecolour = colour
def GetMiddleTextColour(self):
"""
Gets The Colour For The Text In The Middle.
@rtype: L{wx.Colour}
"""
return self._middlecolour
def SetBottomText(self, text=None):
"""
Sets The Text To Be Drawn Near The Bottom Of SpeedMeter. Can have up to one newline. This should be used for a label, such as the gauge type and scale (i.e. "RPM x1000)
Newlines are understood. The text is drawn as two separate lines, and this is taken into account when positioning the LED digits if used.
@param text: the text to draw
@type text: L{string}
"""
if text is None:
text = ""
self._bottomtext = text
def GetBottomText(self):
"""
Gets The Text To Be Drawn Near The Bottom Of SpeedMeter (label)
@rtype: L{string}
"""
return self._bottomtext
def SetBottomTextFont(self, font=None):
"""
Sets The Font For The Text In The Bottom.
@param font: the font for the text (default 10pt, wx.Font(1, wx.SWISS, wx.NORMAL, wx.BOLD, False))
@type font: L{wx.Font}
"""
if font is None:
self._bottomtextfont = wx.Font(1, wx.SWISS, wx.NORMAL, wx.BOLD, False)
self._bottomtextsize = 10.0
self._bottomtextfont.SetPointSize(self._bottomtextsize)
else:
self._bottomtextfont = font
self._bottomtextsize = font.GetPointSize()
self._bottomtextfont.SetPointSize(self._bottomtextsize)
def GetBottomTextFont(self):
"""
Gets The Font For The Text In The Bottom.
@rtype: L{tuple} of (L{wx.Font}, L{float} size)
"""
return self._bottomtextfont, self._bottomtextsize
def SetBottomTextColour(self, colour=None):
"""
Sets The Colour For The Text In The Bottom of the gauge (label).
@param colour: the colour for the text
@type colour: L{wx.Colour}
"""
if colour is None:
colour = wx.BLUE
self._bottomcolour = colour
def SetLEDColour(self, colour=None):
"""
Sets The Colour For Bottom LED digits.
@param colour: the colour for the digits
@type colour: L{wx.Colour}
"""
if colour is None:
colour = wx.GREEN
self._ledcolour = colour
def GetLEDColour(self):
"""
Gets The Colour For The LED Digits
@rtype: L{wx.Colour}
"""
return self._ledcolour
def GetBottomTextColour(self):
"""
Gets The Colour For The Text In The Bottom
@rtype: L{wx.Colour}
"""
return self._bottomcolour
def SetMiddleIcon(self, icon):
"""
Sets The Icon To Be Drawn Near The Center Of SpeedMeter.
@param icon: The icon to be drawn
@type icon: L{wx.Icon}
"""
if icon.Ok():
self._middleicon = icon
else:
# edited 2010-06-13 by jantman to get rid of error - was raising an error as a string
print "\nERROR: Invalid Icon Passed To SpeedMeter."
return False
def GetMiddleIcon(self):
"""
Gets The Icon To Be Drawn Near The Center Of SpeedMeter.
@rtype: L{wx.Icon}
"""
return self._middleicon
def __GetMiddleIconDimens(self):
"""
USED INTERNALLY ONLY - Undocumented. Do NOT call from outside this class.
"""
return self._middleicon.GetWidth(), self._middleicon.GetHeight()
def __CircleCoords(self, radius, angle, centerX, centerY):
"""
USED INTERNALLY ONLY - Undocumented. Do NOT call from outside this class.
Method to get the coordinates of the circle.
"""
x = radius*cos(angle) + centerX
y = radius*sin(angle) + centerY
return x, y
def __GetIntersection(self, current, intervals):
"""
USED INTERNALLY ONLY - Undocumented. Do NOT call from outside this class.
"""
if self.GetDirection() == "Reverse":
interval = intervals[:]
interval.reverse()
else:
interval = intervals
indexes = range(len(intervals))
try:
intersection = [ind for ind in indexes if interval[ind] <= current <= interval[ind+1]]
except:
if self.GetDirection() == "Reverse":
intersection = [len(intervals) - 1]
else:
intersection = [0]
return intersection[0]
def SetFirstGradientColour(self, colour=None):
"""
Sets The First Gradient Colour (Near The Ticks).
@param colour: Color for the second gradient
@type colour: L{wx.Colour}
"""
if colour is None:
colour = wx.Colour(145, 220, 200)
self._firstgradientcolour = colour
def GetFirstGradientColour(self):
"""
Gets The First Gradient Colour (Near The Ticks).
@return: first gradient color
@rtype: L{wx.Colour}
"""
return self._firstgradientcolour
def SetSecondGradientColour(self, colour=None):
"""
Sets The Second Gradient Colour (Near The Center).
@param colour: Color for the second gradient
@type colour: L{wx.Colour}
"""
if colour is None:
colour = wx.WHITE
self._secondgradientcolour = colour
def GetSecondGradientColour(self):
"""
Gets The First Gradient Colour (Near The Center).
@return: second gradient color
@rtype: L{wx.Colour}
"""
return self._secondgradientcolour
def SetHandStyle(self, style=None):
"""
Sets The Style For The Hand (Arrow Indicator).
By Specifying "Hand" SpeedMeter Will Draw A Polygon That Simulates The Car
Speed Control Indicator. Using "Arrow" Will Force SpeedMeter To Draw A
Simple Arrow.
@param style: hand style, string, either "Arrow" or "Hand"
@type style: L{string}
"""
if style is None:
style = "Hand"
if style not in ["Hand", "Arrow"]:
raise '\nERROR: Hand Style Parameter Should Be One Of "Hand" Or "Arrow".'
return
self._handstyle = style
def GetHandStyle(self):
"""
Gets The Style For The Hand (Arrow Indicator)
@return: hand style, string either "Arrow" or "Hand"
@rtype: L{string}
"""
return self._handstyle
def DrawExternalArc(self, draw=True):
"""
Specify Wheter Or Not You Wish To Draw The External (Thicker) Arc.
@param draw: Whether or not to draw the external arc.(default True)
@type draw: L{boolean}
"""
self._drawarc = draw
def DrawExternalCircle(self, draw=False):
"""
Specify Wheter Or Not You Wish To Draw The External (Thicker) Arc as a full circle.
@param draw: boolean, whether or not to draw the full circle (default False)
@type draw: L{boolean}
"""
self._drawfullarc = draw
def OnMouseMotion(self, event):
""" Handles The Mouse Events.
Here Only Left Clicks/Drags Are Involved. Should SpeedMeter Have Something More?
@todo: Do we even want this? What does it do? Seems like it would allow the user to change the value or something, which is BAD.
"""
mousex = event.GetX()
mousey = event.GetY()
if event.Leaving():
return
pos = self.GetClientSize()
size = self.GetPosition()
centerX = self.CenterX
centerY = self.CenterY
direction = self.GetDirection()
if event.LeftIsDown():
angle = atan2(float(mousey) - centerY, centerX - float(mousex)) + pi - self.EndAngle
if angle >= 2*pi:
angle = angle - 2*pi
if direction == "Advance":
currentvalue = (self.StartAngle - self.EndAngle - angle)*float(self.Span)/(self.StartAngle - self.EndAngle) + self.StartValue
else:
currentvalue = (angle)*float(self.Span)/(self.StartAngle - self.EndAngle) + self.StartValue
if currentvalue >= self.StartValue and currentvalue <= self.EndValue:
self.SetSpeedValue(currentvalue)
event.Skip()
def GetSpeedStyle(self):
""" Returns A List Of Strings And A List Of Integers Containing The Styles. """
stringstyle = []
integerstyle = []
if self._extrastyle & SM_ROTATE_TEXT:
stringstyle.append("SM_ROTATE_TEXT")
integerstyle.append(SM_ROTATE_TEXT)
if self._extrastyle & SM_DRAW_SECTORS:
stringstyle.append("SM_DRAW_SECTORS")
integerstyle.append(SM_DRAW_SECTORS)
if self._extrastyle & SM_DRAW_PARTIAL_SECTORS:
stringstyle.append("SM_DRAW_PARTIAL_SECTORS")
integerstyle.append(SM_DRAW_PARTIAL_SECTORS)
if self._extrastyle & SM_DRAW_HAND:
stringstyle.append("SM_DRAW_HAND")
integerstyle.append(SM_DRAW_HAND)
if self._extrastyle & SM_DRAW_SHADOW:
stringstyle.append("SM_DRAW_SHADOW")
integerstyle.append(SM_DRAW_SHADOW)
if self._extrastyle & SM_DRAW_PARTIAL_FILLER:
stringstyle.append("SM_DRAW_PARTIAL_FILLER")
integerstyle.append(SM_DRAW_PARTIAL_FILLER)
if self._extrastyle & SM_DRAW_SECONDARY_TICKS:
stringstyle.append("SM_DRAW_SECONDARY_TICKS")
integerstyle.append(SM_DRAW_SECONDARY_TICKS)
if self._extrastyle & SM_DRAW_MIDDLE_TEXT:
stringstyle.append("SM_DRAW_MIDDLE_TEXT")
integerstyle.append(SM_DRAW_MIDDLE_TEXT)
if self._extrastyle & SM_DRAW_BOTTOM_TEXT:
stringstyle.append("SM_DRAW_BOTTOM_TEXT")
integerstyle.append(SM_DRAW_BOTTOM_TEXT)
if self._extrastyle & SM_DRAW_BOTTOM_LED:
stringstyle.append("SM_DRAW_BOTTOM_LED")
integerstyle.append(SM_DRAW_BOTTOM_LED)
if self._extrastyle & SM_DRAW_MIDDLE_ICON:
stringstyle.append("SM_DRAW_MIDDLE_ICON")
integerstyle.append(SM_DRAW_MIDDLE_ICON)
if self._extrastyle & SM_DRAW_GRADIENT:
stringstyle.append("SM_DRAW_GRADIENT")
integerstyle.append(SM_DRAW_GRADIENT)
if self._extrastyle & SM_DRAW_FANCY_TICKS:
stringstyle.append("SM_DRAW_FANCY_TICKS")
integerstyle.append(SM_DRAW_FANCY_TICKS)
return stringstyle, integerstyle
# below here is stuff added by jantman for the LED control
def SetDrawFaded(self, DrawFaded=None, Redraw=False):
"""
Set the option to draw the faded (non-used) LED segments.
@param DrawFaded: Whether or not to draw the unused segments.
@type DrawFaded: L{boolean}
@param Redraw: Whether or not to redraw NOW.
@type Redraw: L{boolean}
"""
if DrawFaded is None:
self._DrawFaded = DrawFaded
if DrawFaded != self._DrawFaded:
self._DrawFaded = DrawFaded
if Redraw:
Refresh(False)
def _InitLEDInternals(self):
"""
Sets up the class variables for the LED control stuff.
Should ONLY be called INTERNALLY.
"""
self._LineMargin = None
self._LineLength = None
self._LineWidth = None
self._DigitMargin = None
self._LeftStartPos = None
def _DrawLED(self, dc, CenterX):
"""
Handles all of the drawing for the LED control, just an extension to the original SpeedMeter Draw() method.
Should ONLY be called INTERNALLY.
@todo: this is hard coded to ignore the background - doesn't draw it. If you want something different, you need to change it.
@param dc: the DC
@type dc: L{dc}
@param CenterX: The X coordinate of the center of the gauge, as found in the original SpeedMeter code.
@type CenterX: L{int}
"""
self._RecalcInternals()
# Iterate each digit in the value, and draw.
if self.DEBUG is True:
print "===Drawing LED Value String: " + self._speedStr
for i in range(len(self._speedStr)):
c = self._speedStr[i]
if self.DEBUG:
print "Digit Number: " + str(i)
print "Drawing Digit: " + c
# Draw faded lines if wanted.
if self._DrawFaded and (c != '.'):
self._DrawDigit(dc, DIGITALL, i)
# Draw the digits.
if c == '0':
self._DrawDigit(dc, DIGIT0, i)
elif c == '1':
self._DrawDigit(dc, DIGIT1, i)
elif c == '2':
self._DrawDigit(dc, DIGIT2, i)
elif c == '3':
self._DrawDigit(dc, DIGIT3, i)
elif c == '4':
self._DrawDigit(dc, DIGIT4, i)
elif c == '5':
self._DrawDigit(dc, DIGIT5, i)
elif c == '6':
self._DrawDigit(dc, DIGIT6, i)
elif c == '7':
self._DrawDigit(dc, DIGIT7, i)
elif c == '8':
self._DrawDigit(dc, DIGIT8, i)
elif c == '9':
self._DrawDigit(dc, DIGIT9, i)
elif c == '-':
self._DrawDigit(dc, DASH, i)
elif c == '.':
self._DrawDigit(dc, DECIMALSIGN, (i-1))
elif c == ' ':
# skip this
pass
else:
print "Error: Undefined Digit Value: " + c
def _DrawDigit(self, dc, Digit, Column):
"""
Internal code to actually draw the lines that make up a single digit.
Should be called INTERNALLY ONLY.
@param dc: The DC.
@type dc: L{dc}
@param Digit: The constant (mask) defining the lines of the specified digit.
@type Digit: L{int}
@param Column: the number of the column that the digit should be in
@type Column: L{int}
"""
LineColor = self.GetForegroundColour()
if Digit == DIGITALL:
R = LineColor.Red() / 16
G = LineColor.Green() / 16
B = LineColor.Blue() / 16
LineColor = wx.Colour(R, G, B)
XPos = self._LeftStartPos + (Column * (self._LineLength + self._DigitMargin))
# Create a pen and draw the lines.
Pen = wx.Pen(LineColor, self._LineWidth, wx.SOLID)
dc.SetPen(Pen)
if Digit & LINE1:
dc.DrawLine(XPos + self._LineMargin*2, self._LineMargin + self.LEDyOffset,
XPos + self._LineLength + self._LineMargin*2, self._LineMargin + self.LEDyOffset)
if self.DEBUG:
print "Line1"
if Digit & LINE2:
dc.DrawLine(XPos + self._LineLength + self._LineMargin*3,
self._LineMargin*2 + self.LEDyOffset, XPos + self._LineLength + self._LineMargin*3,
self._LineLength + (self._LineMargin*2) + self.LEDyOffset)
if self.DEBUG:
print "Line2"
if Digit & LINE3:
dc.DrawLine(XPos + self._LineLength + self._LineMargin*3, self._LineLength + (self._LineMargin*4) + self.LEDyOffset,
XPos + self._LineLength + self._LineMargin*3, self._LineLength*2 + (self._LineMargin*4) + self.LEDyOffset)
if self.DEBUG:
print "Line3"
if Digit & LINE4:
dc.DrawLine(XPos + self._LineMargin*2, self._LineLength*2 + (self._LineMargin*5) + self.LEDyOffset,
XPos + self._LineLength + self._LineMargin*2, self._LineLength*2 + (self._LineMargin*5) + self.LEDyOffset)
if self.DEBUG:
print "Line4"
if Digit & LINE5:
dc.DrawLine(XPos + self._LineMargin, self._LineLength + (self._LineMargin*4) + self.LEDyOffset,
XPos + self._LineMargin, self._LineLength*2 + (self._LineMargin*4) + self.LEDyOffset)
if self.DEBUG:
print "Line5"
if Digit & LINE6:
dc.DrawLine(XPos + self._LineMargin, self._LineMargin*2 + self.LEDyOffset,
XPos + self._LineMargin, self._LineLength + (self._LineMargin*2) + self.LEDyOffset)
if self.DEBUG:
print "Line6"
if Digit & LINE7:
dc.DrawLine(XPos + self._LineMargin*2, self._LineLength + (self._LineMargin*3) + self.LEDyOffset,
XPos + self._LineMargin*2 + self._LineLength, self._LineLength + (self._LineMargin*3) + self.LEDyOffset)
if self.DEBUG:
print "Line7"
if Digit & DECIMALSIGN:
dc.DrawLine(XPos + self._LineLength + self._LineMargin*4, self._LineLength*2 + (self._LineMargin*5) + self.LEDyOffset,
XPos + self._LineLength + self._LineMargin*4, self._LineLength*2 + (self._LineMargin*5) + self.LEDyOffset)
if self.DEBUG:
print "Line DecimalSign"
#Dc.SetPen(wxNullPen);
def _RecalcInternals(self):
"""
Recalculates all variables controlling the placement and gemoetry of the digits. Bases it off of the Frame size. This should calculate everything like the gauge center and work off of that.
Should be called INTERNALLY ONLY.
Dimensions of LED segments
Size of character is based on the HEIGH of the widget, NOT the width.
Segment height is calculated as follows:
Each segment is m_LineLength pixels long.
There is m_LineMargin pixels at the top and bottom of each line segment
There is m_LineMargin pixels at the top and bottom of each digit
Therefore, the heigth of each character is:
m_LineMargin : Top digit boarder
m_LineMargin+m_LineLength+m_LineMargin : Top half of segment
m_LineMargin+m_LineLength+m_LineMargin : Bottom half of segment
m_LineMargin : Bottom digit boarder
----------------------
m_LineMargin*6 + m_LineLength*2 == Total height of digit.
Therefore, (m_LineMargin*6 + m_LineLength*2) must equal Height
Spacing between characters can then be calculated as follows:
m_LineMargin : before the digit,
m_LineMargin+m_LineLength+m_LineMargin : for the digit width
m_LineMargin : after the digit
= m_LineMargin*4 + m_LineLength
"""
# the size params for just the LED area itself
size = self.GetClientSize()
LEDHeight = int(size.y / 7) # based off of height of 30 in a 214px high client
Height = LEDHeight
LEDWidth = int(size.x / 2.4) # based off of width of 120 in a 290px wide client
ClientWidth = size.x
self.LEDyOffset = self.bottomTextBottom
if (Height * 0.075) < 1:
self._LineMargin = 1
else:
self._LineMargin = int(Height * 0.075)
if (Height * 0.275) < 1:
self._LineLength = 1
else:
self._LineLength = int(Height * 0.275)
self._LineWidth = self._LineMargin
self._DigitMargin = self._LineMargin * 4
# Count the number of characters in the string; '.' characters are not
# included because they do not take up space in the display
count = 0;
for char in self._speedStr:
if char != '.':
count = count + 1
ValueWidth = (self._LineLength + self._DigitMargin) * count
if self._Alignment == gizmos.LED_ALIGN_LEFT:
self._LeftStartPos = self._LineMargin + LeftEdge
elif self._Alignment == gizmos.LED_ALIGN_RIGHT:
self._LeftStartPos = ClientWidth - ValueWidth - self._LineMargin + LeftEdge
else:
# self._Alignment == gizmos.LED_ALIGN_CENTER:
# centered is the default
self._LeftStartPos = (ClientWidth /2 ) - (ValueWidth / 2)
def SetLEDAlignment(self, Alignment=None, Redraw=False):
"""
Sets LED digit alignment.
@param Alignment - the alignment of the LED digits - valid values are L{gizmos.LED_ALIGN_LEFT}, L{gizmos.LED_ALIGN_RIGHT}, L{gizmos.LED_ALIGN_CENTER} (center is default).
@type Alignment: wxLEDValueAlign
@param Redraw: Whether or not to redraw NOW.
@type Redraw: L{boolean}
"""
if Alignment is None:
self._Alignment = Alignment
if Alignment != self._Alignment:
self._Alignment = Alignment
if Redraw:
try:
self.UpdateDrawing()
except:
pass
def SetDrawFaded(self, DrawFaded=None, Redraw=False):
"""
Whether or not to draw the unused line segments. If true, draws them faded.
@param DrawFaded: Whether or not to draw the faded segments. (Default False)
@type DrawFaded: L{boolean}
@param Redraw: Whether or not to redraw NOW.
@type Redraw: L{boolean}
"""
if DrawFaded is None:
self._DrawFaded = DrawFaded
if DrawFaded != self._DrawFaded:
self._DrawFaded = DrawFaded
if Redraw:
Refresh(False)
def SetValueMultiplier(self, multiplier=1):
"""
Sets the value multiplier. Values set with SetValue() will be multiplied by this amount before being displayed on the LED control.
@param multiplier: the value multiplier
@type multiplier: L{int} or L{float}
@todo: re-do all this by setting a ValueScale (maybe at create time) and using this scale to determine the gauge scale, also divide values by it before feeding into the meter code itself (i.e. LED will show value as passed with SetValue()).
"""
self._ValueMultiplier = multiplier
| jantman/python-obd | SpeedMeter.py | Python | gpl-3.0 | 81,844 |
#!/usr/bin/env python
# Copyright (C) 2014-2017 Shea G Craig
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""misc_endpoints.py
Classes representing API endpoints that don't subclass JSSObject
"""
from __future__ import print_function
from __future__ import absolute_import
import mimetypes
import os
import sys
from xml.etree import ElementTree
from .exceptions import MethodNotAllowedError, PostError
from .tools import error_handler
__all__ = ('CommandFlush', 'FileUpload', 'LogFlush')
# Map Python 2 basestring type for Python 3.
if sys.version_info.major == 3:
basestring = str
# pylint: disable=missing-docstring
# pylint: disable=too-few-public-methods
class CommandFlush(object):
_endpoint_path = "commandflush"
can_get = False
can_put = False
can_post = False
def __init__(self, jss):
"""Initialize a new CommandFlush
Args:
jss: JSS object.
"""
self.jss = jss
@property
def url(self):
"""Return the path subcomponent of the url to this object."""
return self._endpoint_path
def command_flush_with_xml(self, data):
"""Flush commands for devices with a supplied xml string.
From the Casper API docs:
Status and devices specified in an XML file. Id lists may be
specified for <computers>, <computer_groups>, <mobile_devices>,
<mobile_device_groups>. Sample file:
<commandflush>
<status>Pending+Failed</status>
<mobile_devices>
<mobile_device>
<id>1</id>
</mobile_device>
<mobile_device>
<id>2</id>
</mobile_device>
</mobile_devices>
</commandflush>
Args:
data (string): XML string following the above structure or
an ElementTree/Element.
Raises:
DeleteError if provided url_path has a >= 400 response.
"""
if not isinstance(data, basestring):
data = ElementTree.tostring(data, encoding='UTF-8')
self.jss.delete(self.url, data)
def command_flush_for(self, id_type, command_id, status):
"""Flush commands for an individual device.
Args:
id_type (str): One of 'computers', 'computergroups',
'mobiledevices', or 'mobiledevicegroups'.
id_value (str, int, list): ID value(s) for the devices to
flush. More than one device should be passed as IDs
in a list or tuple.
status (str): One of 'Pending', 'Failed', 'Pending+Failed'.
Raises:
DeleteError if provided url_path has a >= 400 response.
"""
id_types = ('computers', 'computergroups', 'mobiledevices',
'mobiledevicegroups')
status_types = ('Pending', 'Failed', 'Pending+Failed')
if id_type not in id_types or status not in status_types:
raise ValueError("Invalid arguments.")
if isinstance(command_id, list):
command_id = ",".join(str(item) for item in command_id)
flush_url = "{}/{}/id/{}/status/{}".format(
self.url, id_type, command_id, status)
self.jss.delete(flush_url)
# pylint: disable=too-few-public-methods
class FileUpload(object):
"""FileUploads are a special case in the API. They allow you to add
file resources to a number of objects on the JSS.
To use, instantiate a new FileUpload object, then use the save()
method to upload.
Once the upload has been posted you may only interact with it
through the web interface. You cannot list/get it or delete it
through the API.
However, you can reuse the FileUpload object if you wish, by
changing the parameters, and issuing another save().
"""
_endpoint_path = "fileuploads"
allowed_kwargs = ('subset',)
def __init__(self, j, resource_type, id_type, _id, resource):
"""Prepare a new FileUpload.
Args:
j: A JSS object to POST the upload to.
resource_type:
String. Acceptable Values:
Attachments:
computers
mobiledevices
enrollmentprofiles
peripherals
mobiledeviceenrollmentprofiles
Icons:
policies
ebooks
mobiledeviceapplicationsicon
Mobile Device Application:
mobiledeviceapplicationsipa
Disk Encryption
diskencryptionconfigurations
diskencryptions (synonymous)
PPD
printers
id_type:
String of desired ID type:
id
name
_id: Int or String referencing the identity value of the
resource to add the FileUpload to.
resource: String path to the file to upload.
"""
resource_types = ["computers", "mobiledevices", "enrollmentprofiles",
"peripherals", "mobiledeviceenrollmentprofiles",
"policies", "ebooks", "mobiledeviceapplicationsicon",
"mobiledeviceapplicationsipa",
"diskencryptionconfigurations", "printers"]
id_types = ["id", "name"]
self.jss = j
# Do some basic error checking on parameters.
if resource_type in resource_types:
self.resource_type = resource_type
else:
raise TypeError(
"resource_type must be one of: %s" % ', '.join(resource_types))
if id_type in id_types:
self.id_type = id_type
else:
raise TypeError("id_type must be one of: %s" % ', '.join(id_types))
self._id = str(_id)
basename = os.path.basename(resource)
content_type = mimetypes.guess_type(basename)[0]
self.resource = {"name": (basename, open(resource, "rb"),
content_type)}
self._set_upload_url()
def _set_upload_url(self):
"""Generate the full URL for a POST."""
# pylint: disable=protected-access
self._upload_url = "/".join([
self.jss._url, self._endpoint_path, self.resource_type,
self.id_type, str(self._id)])
# pylint: enable=protected-access
def save(self):
"""POST the object to the JSS."""
try:
response = self.jss.session.post(
self._upload_url, files=self.resource)
except PostError as error:
if error.status_code == 409:
raise PostError(error)
else:
raise MethodNotAllowedError(self.__class__.__name__)
if response.status_code == 201:
if self.jss.verbose:
print("POST: Success")
print(response.content)
elif response.status_code >= 400:
error_handler(PostError, response)
class LogFlush(object):
_endpoint_path = "logflush"
def __init__(self, jss):
"""Initialize a new LogFlush
Args:
jss: JSS object.
"""
self.jss = jss
@property
def url(self):
"""Return the path subcomponent of the url to this object."""
return self._endpoint_path
def log_flush_with_xml(self, data):
"""Flush logs for devices with a supplied xml string.
From the Casper API docs:
log, log_id, interval, and devices specified in an XML file.
Sample file:
<logflush>
<log>policy</log>
<log_id>2</log_id>
<interval>THREE MONTHS</interval>
<computers>
<computer>
<id>1</id>
</computer>
<computer>
<id>2</id>
</computer>
</computers>
</logflush>
Args:
data (string): XML string following the above structure or
an ElementTree/Element.
Elements:
logflush (root)
log (Unknown; "policy" is the only one listed in
docs).
log_id: Log ID value.
interval: Combination of "Zero", "One", "Two",
"Three", "Six", and "Day", "Week", "Month",
"Year". e.g. ("Three+Months")
Please note: The documentation for this
specifies the singular form (e.g. "Month"),
and plural ("Months") at different times, and
further the construction is listed as
"THREE MONTHS" elsewhere. Limited testing
indicates that pluralization does not matter,
nor does capitalization. The "+" seems optional
as well.
Please test!
Device Arrays:
Again, acceptable values are not listed in the
docs, aside from the example ("computers").
Presumably "mobiledevices", and possibly
"computergroups" and "mobiledevicegroups" work.
Raises:
DeleteError if provided url_path has a >= 400 response.
"""
if not isinstance(data, basestring):
data = ElementTree.tostring(data, encoding='UTF-8')
self.jss.delete(self.url, data)
def log_flush_for_interval(self, log_type, interval):
"""Flush logs for an interval of time.
Args:
log_type (str): Only documented type is "policies". This
will be applied by default if nothing is passed.
interval (str): Combination of "Zero", "One", "Two",
"Three", "Six", and "Day", "Week", "Month", "Year". e.g.
("Three+Months") Please note: The documentation for this
specifies the singular form (e.g. "Month"), and plural
("Months") at different times, and further the
construction is listed as "THREE MONTHS" elsewhere.
Limited testing indicates that pluralization does not
matter, nor does capitalization.
Please test!
No validation is performed on this prior to the request
being made.
Raises:
DeleteError if provided url_path has a >= 400 response.
"""
if not log_type:
log_type = "policies"
# The XML for the /logflush basic endpoint allows spaces
# instead of "+", so do a replace here just in case.
interval = interval.replace(" ", "+")
flush_url = "{}/{}/interval/{}".format(
self.url, log_type, interval)
self.jss.delete(flush_url)
def log_flush_for_obj_for_interval(self, log_type, obj_id, interval):
"""Flush logs for an interval of time for a specific object.
Please note, log_type is a variable according to the API docs,
but acceptable values are not listed. Only "policies" is
demonstrated as an acceptable value.
Args:
log_type (str): Only documented type is "policies". This
will be applied by default if nothing is passed.
obj_id (str or int): ID of the object to have logs flushed.
interval (str): Combination of "Zero", "One", "Two",
"Three", "Six", and "Day", "Week", "Month", "Year". e.g.
("Three+Months") Please note: The documentation for this
specifies the singular form (e.g. "Month"), and plural
("Months") at different times, and further the
construction is listed as "THREE MONTHS" elsewhere.
Limited testing indicates that pluralization does not
matter, nor does capitalization.
Please test!
No validation is performed on this prior to the request
being made.
Raises:
DeleteError if provided url_path has a >= 400 response.
"""
if not log_type:
log_type = "policies"
# The XML for the /logflush basic endpoint allows spaces
# instead of "+", so do a replace here just in case.
interval = interval.replace(" ", "+")
flush_url = "{}/{}/id/{}/interval/{}".format(
self.url, log_type, obj_id, interval)
self.jss.delete(flush_url)
# pylint: enable=missing-docstring
# pylint: enable=too-few-public-methods
| sheagcraig/python-jss | jss/misc_endpoints.py | Python | gpl-3.0 | 13,525 |
#!/usr/bin/env python
"""
Copyright (C) 2015 Louis Dijkstra
This file is part of error-model-aligner
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import print_function, division
from optparse import OptionParser
import os
import sys
from scipy.stats import norm
import numpy as np
from scipy.optimize import minimize
import math
__author__ = "Louis Dijkstra"
usage = """%prog [options] <.insert-sizes>
<.insert-sizes> File containing the insert size observations
when there is no indel
Outputs the mean and standard deviation of the null model (i.e., a discrete
approximation of a Normal distribution that does not allow for negative values)
The file .insert-sizes must be orginazed in two columns (tab seperated):
x_1 c_1
x_2 c_2
... ...
x_n c_n
where x_1 is the minimal insert size observed x_n is the maximum value found. (Note: x_{i+1} = x_i + 1).
c_i is the count for x_i.
"""
def normalizationFactor(mu, sigma):
"""Returns the normalization factor given mean mu and STD sigma"""
return 1.0 / (1.0 - norm.cdf((-mu - 0.5)/sigma))
def f(isize, mu, sigma):
p = norm.cdf((isize + 0.5 - mu)/sigma) - norm.cdf((isize - 0.5 - mu)/sigma)
if p < sys.float_info.min:
return sys.float_info.min
return p
def loglikelihood(mu, sigma, isizes, counts, n):
"""Returns the loglikelihood of mu and sigma given the data (isizes, counts and n)"""
l = n * math.log(normalizationFactor(mu, sigma))
for isize, count in zip(isizes, counts):
l += count * math.log(f(isize, mu, sigma))
return l
def aux_loglikelihood(var, isizes, counts, n):
mu = var[0]
sigma = var[1]
return -1.0 * loglikelihood(mu, sigma, isizes, counts, n)
def main():
parser = OptionParser(usage=usage)
parser.add_option("-f", action="store", dest="maxfun", default=1000, type=int,
help="Maximum number of function evaluations (Default = 1000) ")
parser.add_option("-i", action="store", dest="maxiter", default=100, type=int,
help="Maximum number of iterations (Default = 100) ")
parser.add_option("-m", action="store", dest="mu_init", default=100.0, type=float,
help="Initial guess for the mean (mu). (Default is 100) ")
parser.add_option("-s", action="store", dest="sigma_init", default=10.0, type=float,
help="Initial guess for the standard deviation (sigma). (Default is 10) ")
parser.add_option("-v", action="store_true", dest="verbose", default=False,
help="Verbose. Output of the optimizer is printed. ")
(options, args) = parser.parse_args()
if (len(args)!=1):
parser.print_help()
return 1
isizes = [] # insert sizes that were observed
counts = [] # number of times these insert sizes were observed
for line in open(args[0], 'r'):
values = map(int, line.split())
isizes.append(values[0])
counts.append(values[1])
isizes = np.array(isizes)
counts = np.array(counts)
n = np.sum(counts)
res = minimize ( aux_loglikelihood,
[options.mu_init, options.sigma_init],
args=[isizes, counts, n],
method="L-BFGS-B",
bounds=[(0, None), (0, None)],
options={'disp': options.verbose, 'maxfun': options.maxfun, 'maxiter': options.maxiter})
print("\n*** RESULTS ***\n")
print("estimated mean: %lf\t estimated STD: %lf\n"%(res.x[0], res.x[1]))
print(res.message)
if __name__ == '__main__':
sys.exit(main())
| louisdijkstra/error-model-aligner | bin/estimate-null-insert-sizes.py | Python | gpl-3.0 | 3,976 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from selenium.webdriver.common.by import By
from pages.base import BasePage
class FirefoxWhatsNew73Page(BasePage):
URL_TEMPLATE = '/{locale}/firefox/73.0/whatsnew/all/{params}'
_set_default_button_locator = (By.ID, 'set-as-default-button')
@property
def is_default_browser_button_displayed(self):
return self.is_element_displayed(*self._set_default_button_locator)
| ericawright/bedrock | tests/pages/firefox/whatsnew/whatsnew_73.py | Python | mpl-2.0 | 595 |
# flake8: noqa
from bedrock.mozorg.templatetags import misc, social_widgets
| sgarrity/bedrock | bedrock/mozorg/templatetags/__init__.py | Python | mpl-2.0 | 76 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import time
import json
from contextlib import nested
import mock
from nose.tools import eq_, ok_, assert_raises
from configman import ConfigurationManager
from socorro.external.hbase import hbase_client
from socorro.external.crashstorage_base import (
CrashIDNotFound,
Redactor,
MemoryDumpsMapping
)
from socorro.external.hbase.crashstorage import HBaseCrashStorage
from socorro.external.hbase.connection_context import \
HBaseConnectionContextPooled
from socorro.lib.util import DotDict
from socorro.unittest.config import commonconfig
from socorro.database.transaction_executor import (
TransactionExecutorWithLimitedBackoff
)
from socorro.unittest.testbase import TestCase
class SomeThriftError(Exception):
pass
_run_integration_tests = os.environ.get('RUN_HBASE_INTEGRATION_TESTS', False)
if _run_integration_tests in ('false', 'False', 'no', '0'):
_run_integration_tests = False
if not _run_integration_tests:
import logging
logging.warning("Skipping HBase integration tests")
else:
class TestIntegrationHBaseCrashStorage(TestCase):
"""
If you ever get this::
Traceback (most recent call last):
...
socorro.external.hbase.hbase_client.FatalException: the connection
is not viable. retries fail:
Then try the following:
/etc/init.d/hadoop-hbase-master restart
/etc/init.d/hadoop-hbase-thrift restart
Also, you can look in /var/log/hbase for clues.
Still not working, try:
hbase shell
> describe 'crash_reports'
and keep an eye on the logs.
"""
def tearDown(self):
super(TestIntegrationHBaseCrashStorage, self).tearDown()
self._truncate_hbase_table()
def _truncate_hbase_table(self):
connection = hbase_client.HBaseConnectionForCrashReports(
commonconfig.hbaseHost.default,
commonconfig.hbasePort.default,
100
)
for row in connection.merge_scan_with_prefix(
'crash_reports', '', ['ids:ooid']):
index_row_key = row['_rowkey']
connection.client.deleteAllRow(
'crash_reports', index_row_key)
# because of HBase's async nature, deleting can take time
list(connection.iterator_for_all_legacy_to_be_processed())
def test_basic_hbase_crashstorage(self):
mock_logging = mock.Mock()
required_config = HBaseCrashStorage.required_config
required_config.add_option('logger', default=mock_logging)
config_manager = ConfigurationManager(
[required_config],
app_name='testapp',
app_version='1.0',
app_description='app description',
values_source_list=[{
'logger': mock_logging,
'hbase_timeout': 100,
'hbase_host': commonconfig.hbaseHost.default,
'hbase_port': commonconfig.hbasePort.default,
}],
argv_source=[]
)
with config_manager.context() as config:
crashstorage = HBaseCrashStorage(config)
eq_(list(crashstorage.new_crashes()), [])
crash_id = '86b58ff2-9708-487d-bfc4-9dac32121214'
raw = ('{"name":"Peter", '
'"submitted_timestamp":"%d"}' % time.time())
fake_raw_dump_1 = 'peter is a swede'
fake_raw_dump_2 = 'lars is a norseman'
fake_raw_dump_3 = 'adrian is a frenchman'
fake_dumps = MemoryDumpsMapping({
'upload_file_minidump': fake_raw_dump_1,
'lars': fake_raw_dump_2,
'adrian': fake_raw_dump_3
})
crashstorage.save_raw_crash(json.loads(raw),
fake_dumps,
crash_id)
assert config.logger.info.called
assert config.logger.info.call_count > 1
msg_tmpl, msg_arg = config.logger.info.call_args_list[1][0]
# ie logging.info(<template>, <arg>)
msg = msg_tmpl % msg_arg
ok_('saved' in msg)
ok_(crash_id in msg)
raw_crash = crashstorage.get_raw_crash(crash_id)
assert isinstance(raw_crash, dict)
eq_(raw_crash['name'], 'Peter')
dump = crashstorage.get_raw_dump(crash_id)
assert isinstance(dump, basestring)
ok_('peter is a swede' in dump)
dumps = crashstorage.get_raw_dumps(crash_id)
assert isinstance(dumps, dict)
ok_('upload_file_minidump' in dumps)
ok_('lars' in dumps)
ok_('adrian' in dumps)
eq_(dumps['upload_file_minidump'],
fake_dumps['upload_file_minidump'])
eq_(dumps['lars'],
fake_dumps['lars'])
eq_(dumps['adrian'],
fake_dumps['adrian'])
# hasn't been processed yet
assert_raises(CrashIDNotFound,
crashstorage.get_processed,
crash_id)
pro = ('{"name":"Peter",'
'"uuid":"86b58ff2-9708-487d-bfc4-9dac32121214", '
'"submitted_timestamp":"%d", '
'"completeddatetime": "%d"}' %
(time.time(), time.time()))
crashstorage.save_processed(json.loads(pro))
data = crashstorage.get_processed(crash_id)
eq_(data['name'], u'Peter')
hb_connection = crashstorage.hbaseConnectionPool.connection()
ok_(hb_connection.transport.isOpen())
crashstorage.close()
ok_(not hb_connection.transport.isOpen())
class TestHBaseCrashStorage(TestCase):
def test_hbase_crashstorage_basic_error(self):
mock_logging = mock.Mock()
required_config = HBaseCrashStorage.get_required_config()
required_config.add_option('logger', default=mock_logging)
config_manager = ConfigurationManager(
[required_config],
app_name='testapp',
app_version='1.0',
app_description='app description',
values_source_list=[{
'logger': mock_logging,
'hbase_timeout': 100,
'hbase_host': commonconfig.hbaseHost.default,
'hbase_port': commonconfig.hbasePort.default,
}],
argv_source=[]
)
with config_manager.context() as config:
config.executor_identity = lambda: 'dwight' # bogus thread id
hbaseclient_ = 'socorro.external.hbase.crashstorage.hbase_client'
with mock.patch(hbaseclient_) as hclient:
klass = hclient.HBaseConnectionForCrashReports
def retry_raiser(*args, **kwargs):
raise SomeThriftError('try again')
klass.put_json_dump.side_effect = ValueError('crap!')
crashstorage = HBaseCrashStorage(config)
raw = ('{"name":"Peter", '
'"submitted_timestamp":"%d"}' % time.time())
# Note, we're not expect it to raise an error
assert_raises(ValueError,
crashstorage.save_raw_crash,
json.loads(raw),
raw,
"abc123"
)
#eq_(instance.put_json_dump.call_count, 3)
def test_hbase_crashstorage_error_after_retries(self):
cshbaseclient_ = 'socorro.external.hbase.crashstorage.hbase_client'
cchbaseclient_ = \
'socorro.external.hbase.connection_context.hbase_client'
with nested(mock.patch(cshbaseclient_),
mock.patch(cchbaseclient_)) as (cshclient, cchclient):
fake_hbase_client_connection = mock.MagicMock()
cshclient.HBaseConnectionForCrashReports.return_value = \
fake_hbase_client_connection
fake_put_json_method = mock.MagicMock()
cshclient.HBaseConnectionForCrashReports.put_json_dump = \
fake_put_json_method
cchclient.HBaseConnectionForCrashReports.return_value = \
fake_hbase_client_connection
fake_hbase_client_connection.hbaseThriftExceptions = \
(SomeThriftError,)
fake_put_json_method.side_effect = SomeThriftError('try again')
config = DotDict({
'logger': mock.MagicMock(),
'hbase_timeout': 0,
'hbase_host': 'somehost',
'hbase_port': 9090,
'number_of_retries': 2,
'hbase_connection_pool_class':
HBaseConnectionContextPooled,
'transaction_executor_class':
TransactionExecutorWithLimitedBackoff,
'backoff_delays': [0, 0, 0],
'redactor_class': Redactor,
'forbidden_keys':
Redactor.required_config.forbidden_keys.default,
'executor_identity': lambda: 'dwight' # bogus thread id
})
crashstorage = HBaseCrashStorage(config)
raw = ('{"name":"Peter", '
'"submitted_timestamp":"%d"}' % time.time())
assert_raises(SomeThriftError,
crashstorage.save_raw_crash,
json.loads(raw),
raw,
{}
)
eq_(fake_put_json_method.call_count, 3)
def test_hbase_crashstorage_success_after_retries(self):
cshbaseclient_ = 'socorro.external.hbase.crashstorage.hbase_client'
cchbaseclient_ = \
'socorro.external.hbase.connection_context.hbase_client'
with nested(mock.patch(cshbaseclient_),
mock.patch(cchbaseclient_)) as (cshclient, cchclient):
fake_hbase_client_connection = mock.MagicMock()
cshclient.HBaseConnectionForCrashReports.return_value = \
fake_hbase_client_connection
fake_put_json_method = mock.MagicMock()
cshclient.HBaseConnectionForCrashReports.put_json_dump = \
fake_put_json_method
cchclient.HBaseConnectionForCrashReports.return_value = \
fake_hbase_client_connection
fake_hbase_client_connection.hbaseThriftExceptions = \
(SomeThriftError,)
_attempts = [SomeThriftError, SomeThriftError]
def retry_raiser_iterator(*args, **kwargs):
try:
raise _attempts.pop(0)
except IndexError:
return None
fake_put_json_method.side_effect = retry_raiser_iterator
config = DotDict({
'logger': mock.MagicMock(),
'hbase_timeout': 0,
'hbase_host': 'somehost',
'hbase_port': 9090,
'number_of_retries': 2,
'hbase_connection_pool_class':
HBaseConnectionContextPooled,
'transaction_executor_class':
TransactionExecutorWithLimitedBackoff,
'backoff_delays': [0, 0, 0],
'redactor_class': Redactor,
'forbidden_keys':
Redactor.required_config.forbidden_keys.default,
'executor_identity': lambda: 'dwight' # bogus thread id
})
crashstorage = HBaseCrashStorage(config)
raw = ('{"name":"Peter", '
'"submitted_timestamp":"%d"}' % time.time())
crashstorage.save_raw_crash(json.loads(raw), raw, "abc123")
eq_(fake_put_json_method.call_count, 3)
def test_hbase_crashstorage_puts_and_gets(self):
mock_logging = mock.Mock()
required_config = HBaseCrashStorage.get_required_config()
required_config.add_option('logger', default=mock_logging)
config_manager = ConfigurationManager(
[required_config],
app_name='testapp',
app_version='1.0',
app_description='app description',
values_source_list=[{
'logger': mock_logging,
'hbase_timeout': 100,
'hbase_host': commonconfig.hbaseHost.default,
'hbase_port': commonconfig.hbasePort.default,
'transaction_executor_class':
TransactionExecutorWithLimitedBackoff,
'backoff_delays': [0, 0, 0],
}],
argv_source=[]
)
with config_manager.context() as config:
config.executor_identity = lambda: 'dwight' # bogus thread id
hbaseclient_ = 'socorro.external.hbase.crashstorage.hbase_client'
with mock.patch(hbaseclient_) as hclient:
# test save_raw_crash
raw_crash = {
"name": "Peter",
"email": "[email protected]",
"url": "http://embarassing.xxx",
"submitted_timestamp": "2012-05-04T15:10:00",
"user_id": "000-00-0000",
}
fake_binary_dump = "this a bogus binary dump"
expected_raw_crash = raw_crash
expected_dump = fake_binary_dump
expected_dump_2 = fake_binary_dump + " number 2"
# saves us from loooong lines
klass = hclient.HBaseConnectionForCrashReports
crashstorage = HBaseCrashStorage(config)
crashstorage.save_raw_crash(raw_crash, fake_binary_dump,
"abc123")
eq_(
klass.put_json_dump.call_count,
1
)
a = klass.put_json_dump.call_args
eq_(len(a[0]), 4)
#eq_(a[0][1], "abc123")
eq_(a[0][2], expected_raw_crash)
eq_(a[0][3], expected_dump)
eq_(a[1], {'number_of_retries': 0})
# test save_processed
processed_crash = {
"name": "Peter",
"uuid": "abc123",
"email": "[email protected]",
"url": "http://embarassing.xxx",
"user_id": "000-00-0000",
}
expected_processed_crash = {
"name": "Peter",
"uuid": "abc123",
}
expected_unredacted_processed_crash = {
"name": "Peter",
"uuid": "abc123",
"email": "[email protected]",
"url": "http://embarassing.xxx",
"user_id": "000-00-0000",
}
crashstorage = HBaseCrashStorage(config)
crashstorage.save_processed(processed_crash)
eq_(klass.put_processed_json.call_count, 1)
a = klass.put_processed_json.call_args
eq_(len(a[0]), 3)
eq_(a[0][1], "abc123")
eq_(a[0][2], expected_unredacted_processed_crash)
eq_(a[1], {'number_of_retries': 0})
# test get_raw_crash
m = mock.Mock(return_value=raw_crash)
klass.get_json = m
r = crashstorage.get_raw_crash("abc123")
ok_(isinstance(r, DotDict))
a = klass.get_json.call_args
eq_(len(a[0]), 2)
eq_(a[0][1], "abc123")
eq_(klass.get_json.call_count, 1)
eq_(r, expected_raw_crash)
# test get_raw_dump
m = mock.Mock(return_value=fake_binary_dump)
klass.get_dump = m
r = crashstorage.get_raw_dump("abc123")
a = klass.get_dump.call_args
eq_(len(a[0]), 3)
eq_(a[0][1], "abc123")
eq_(klass.get_dump.call_count, 1)
eq_(r, expected_dump)
# test get_raw_dumps
m = mock.Mock(return_value={'upload_file_minidump':
fake_binary_dump})
klass.get_dumps = m
r = crashstorage.get_raw_dumps("abc123")
a = klass.get_dumps.call_args
eq_(len(a[0]), 2)
eq_(a[0][1], "abc123")
eq_(klass.get_dumps.call_count, 1)
eq_(r, {'upload_file_minidump': expected_dump})
# test get_raw_dumps 2
m = mock.Mock(return_value={'upload_file_minidump':
fake_binary_dump,
'aux_1':
expected_dump_2})
klass.get_dumps = m
r = crashstorage.get_raw_dumps("abc123")
a = klass.get_dumps.call_args
eq_(len(a[0]), 2)
eq_(a[0][1], "abc123")
eq_(klass.get_dumps.call_count, 1)
eq_(r, {'upload_file_minidump':
fake_binary_dump,
'aux_1':
expected_dump_2})
# test get_processed
m = mock.Mock(return_value=expected_processed_crash)
klass.get_processed_json = m
r = crashstorage.get_processed("abc123")
ok_(isinstance(r, DotDict))
a = klass.get_processed_json.call_args
eq_(len(a[0]), 2)
eq_(a[0][1], "abc123")
eq_(klass.get_processed_json.call_count, 1)
eq_(r, expected_processed_crash)
| twobraids/socorro | socorro/unittest/external/hbase/test_crashstorage.py | Python | mpl-2.0 | 18,322 |
# -*- coding: utf-8 -*-
import scrapy
class QuotesSpider(scrapy.Spider):
name = 'quotes'
allowed_domains = ['quotes.topscrape.com/tag/humor/']
start_urls = ['http://quotes.topscrape.com/tag/humor/']
def parse(self, response):
for quote in response.css('div.quote'):
yield {
'text': quote.css('span.text::text').extract_first(),
'author': quote.xpath('span/small/text()').extract_first(),
}
next_page = response.css('li.next a::attr("href")').extract_first()
if next_page is not None:
yield response.follow(next_page, self.parse)
| MegaWale/python-playb0x | scrapy/scrapyPlay/properties/properties/spiders/quotes.py | Python | mpl-2.0 | 641 |
import unittest
import tempfile
import os
from os.path import join
import zipfile
from git import *
from shutil import rmtree
from gitbranchhealth.branchhealth import BranchHealthConfig
class GitRepoTest(unittest.TestCase):
def setUp(self):
self.__mOriginTempDir = tempfile.mkdtemp(prefix='gitBranchHealthTest')
self.assertTrue(os.path.exists(self.__mOriginTempDir))
# Create our origin first
testRepoZipPath = join(self.__findTestDir(), 'testrepo.zip')
zipFh = open(testRepoZipPath, 'rb')
testRepoZip = zipfile.ZipFile(zipFh)
for name in testRepoZip.namelist():
testRepoZip.extract(name, self.__mOriginTempDir)
zipFh.close()
self.__mOriginGitRepoPath = os.path.join(self.__mOriginTempDir, 'testrepo')
originRepo = Repo(self.__mOriginGitRepoPath)
self.__mTempDir = tempfile.mkdtemp(prefix='gitBranchHealthTest')
os.mkdir(os.path.join(self.__mTempDir, 'testrepo'))
self.assertTrue(os.path.exists(self.__mTempDir))
# Now create the local repo
self.__mGitRepoPath = os.path.join(self.__mTempDir, 'testrepo')
originRepo.clone(self.__mGitRepoPath)
self.assertTrue(os.path.exists(self.__mGitRepoPath))
self.__mConfig = BranchHealthConfig(self.__mGitRepoPath)
self.__trackAllRemoteBranches()
def tearDown(self):
pass
# rmtree(self.__mTempDir)
# rmtree(self.__mOriginTempDir)
def getConfig(self):
return self.__mConfig
def getTempDir(self):
return self.__mTempDir
## Private API ###
def __trackAllRemoteBranches(self):
repo = Repo(self.__mGitRepoPath)
for remote in repo.remotes:
for branch in remote.refs:
localBranchName = branch.name.split('/')[-1]
if localBranchName != 'master' and localBranchName != 'HEAD':
repo.git.checkout(branch.name, b=localBranchName)
repo.heads.master.checkout()
def __findTestDir(self):
# Find the file called 'testrepo.zip', starting at the current dir
for (root, dirs, files) in os.walk('.'):
if 'testrepo.zip' in files:
return root
| jwir3/gitbranchhealth | tests/testutil.py | Python | mpl-2.0 | 2,053 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Module copies data module Variable DIRECTIONS"""
from data import DIRECTIONS
DIRECTIONS = DIRECTIONS[0:3]+('West',)
| ModestoCabrera/is210-week-07-warmup | task_03.py | Python | mpl-2.0 | 167 |
import bot
import config
if __name__ == '__main__':
bot.init(config)
bot.cancel_all()
| linouxis9/bitshares2-liquiditybots | docker-exchangebot/exchangebots/cancel_all.py | Python | mpl-2.0 | 94 |
import os
MOZ_OBJDIR = 'obj-firefox'
config = {
'default_actions': [
'clobber',
'clone-tools',
'checkout-sources',
#'setup-mock',
'build',
#'upload-files',
#'sendchange',
'check-test',
'valgrind-test',
#'generate-build-stats',
#'update',
],
'stage_platform': 'linux64-valgrind',
'publish_nightly_en_US_routes': False,
'build_type': 'valgrind',
'tooltool_manifest_src': "browser/config/tooltool-manifests/linux64/\
releng.manifest",
'platform_supports_post_upload_to_latest': False,
'enable_signing': False,
'enable_talos_sendchange': False,
'perfherder_extra_options': ['valgrind'],
#### 64 bit build specific #####
'env': {
'MOZBUILD_STATE_PATH': os.path.join(os.getcwd(), '.mozbuild'),
'MOZ_AUTOMATION': '1',
'DISPLAY': ':2',
'HG_SHARE_BASE_DIR': '/builds/hg-shared',
'MOZ_OBJDIR': 'obj-firefox',
'TINDERBOX_OUTPUT': '1',
'TOOLTOOL_CACHE': '/builds/tooltool_cache',
'TOOLTOOL_HOME': '/builds',
'MOZ_CRASHREPORTER_NO_REPORT': '1',
'CCACHE_DIR': '/builds/ccache',
'CCACHE_COMPRESS': '1',
'CCACHE_UMASK': '002',
'LC_ALL': 'C',
## 64 bit specific
'PATH': '/tools/buildbot/bin:/usr/local/bin:/usr/lib64/ccache:/bin:\
/usr/bin:/usr/local/sbin:/usr/sbin:/sbin:/tools/git/bin:/tools/python27/bin:\
/tools/python27-mercurial/bin:/home/cltbld/bin',
},
'src_mozconfig': 'browser/config/mozconfigs/linux64/valgrind',
#######################
}
| Yukarumya/Yukarum-Redfoxes | testing/mozharness/configs/builds/releng_sub_linux_configs/64_valgrind.py | Python | mpl-2.0 | 1,603 |
# -*- coding: utf-8 -*-
from django.db import migrations, models
from django.conf import settings
import editorsnotes.main.fields
class Migration(migrations.Migration):
dependencies = [
('main', '0018_auto_20151019_1331'),
]
operations = [
migrations.AlterField(
model_name='document',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='The time this item was created.'),
),
migrations.AlterField(
model_name='document',
name='creator',
field=models.ForeignKey(related_name='created_document_set', editable=False, to=settings.AUTH_USER_MODEL, help_text='The user who created this item.'),
),
migrations.AlterField(
model_name='document',
name='last_updated',
field=models.DateTimeField(auto_now=True, verbose_name='The last time this item was edited.'),
),
migrations.AlterField(
model_name='document',
name='last_updater',
field=models.ForeignKey(related_name='last_to_update_document_set', editable=False, to=settings.AUTH_USER_MODEL, help_text='The last user to update this item.'),
),
migrations.AlterField(
model_name='featureditem',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='The time this item was created.'),
),
migrations.AlterField(
model_name='featureditem',
name='creator',
field=models.ForeignKey(related_name='created_featureditem_set', editable=False, to=settings.AUTH_USER_MODEL, help_text='The user who created this item.'),
),
migrations.AlterField(
model_name='note',
name='assigned_users',
field=models.ManyToManyField(help_text='Users who have been assigned to this note.', to=settings.AUTH_USER_MODEL, blank=True),
),
migrations.AlterField(
model_name='note',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='The time this item was created.'),
),
migrations.AlterField(
model_name='note',
name='creator',
field=models.ForeignKey(related_name='created_note_set', editable=False, to=settings.AUTH_USER_MODEL, help_text='The user who created this item.'),
),
migrations.AlterField(
model_name='note',
name='is_private',
field=models.BooleanField(default=False, help_text=b"If true, will only be be viewable to users who belong to the note's project."),
),
migrations.AlterField(
model_name='note',
name='last_updated',
field=models.DateTimeField(auto_now=True, verbose_name='The last time this item was edited.'),
),
migrations.AlterField(
model_name='note',
name='last_updater',
field=models.ForeignKey(related_name='last_to_update_note_set', editable=False, to=settings.AUTH_USER_MODEL, help_text='The last user to update this item.'),
),
migrations.AlterField(
model_name='note',
name='license',
field=models.ForeignKey(blank=True, to='licensing.License', help_text='The license under which this note is available.', null=True),
),
migrations.AlterField(
model_name='note',
name='markup',
field=models.TextField(help_text='Text for this item that uses CommonMark syntax, with Working Notes-specific additions for notes, topics, and documents.', null=True, blank=True),
),
migrations.AlterField(
model_name='note',
name='markup_html',
field=editorsnotes.main.fields.XHTMLField(help_text='The markup text for this item rendered into HTML.', null=True, editable=False, blank=True),
),
migrations.AlterField(
model_name='note',
name='project',
field=models.ForeignKey(related_name='notes', to='main.Project', help_text='The project to which this note belongs.'),
),
migrations.AlterField(
model_name='note',
name='status',
field=models.CharField(default='1', help_text='The status of the note. "Open" for outstanding, "Closed" for finished, or "Hibernating" for somewhere in between.', max_length=1, choices=[('0', 'closed'), ('1', 'open'), ('2', 'hibernating')]),
),
migrations.AlterField(
model_name='note',
name='title',
field=models.CharField(help_text='The title of the note.', max_length='80'),
),
migrations.AlterField(
model_name='project',
name='image',
field=models.ImageField(upload_to='project_images', null=True, verbose_name='An image representing this project.', blank=True),
),
migrations.AlterField(
model_name='project',
name='markup',
field=models.TextField(help_text='Text for this item that uses CommonMark syntax, with Working Notes-specific additions for notes, topics, and documents.', null=True, blank=True),
),
migrations.AlterField(
model_name='project',
name='markup_html',
field=editorsnotes.main.fields.XHTMLField(help_text='The markup text for this item rendered into HTML.', null=True, editable=False, blank=True),
),
migrations.AlterField(
model_name='project',
name='name',
field=models.CharField(help_text='The name of the project.', max_length='80'),
),
migrations.AlterField(
model_name='projectinvitation',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='The time this item was created.'),
),
migrations.AlterField(
model_name='projectinvitation',
name='creator',
field=models.ForeignKey(related_name='created_projectinvitation_set', editable=False, to=settings.AUTH_USER_MODEL, help_text='The user who created this item.'),
),
migrations.AlterField(
model_name='scan',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='The time this item was created.'),
),
migrations.AlterField(
model_name='scan',
name='creator',
field=models.ForeignKey(related_name='created_scan_set', editable=False, to=settings.AUTH_USER_MODEL, help_text='The user who created this item.'),
),
migrations.AlterField(
model_name='topic',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='The time this item was created.'),
),
migrations.AlterField(
model_name='topic',
name='creator',
field=models.ForeignKey(related_name='created_topic_set', editable=False, to=settings.AUTH_USER_MODEL, help_text='The user who created this item.'),
),
migrations.AlterField(
model_name='topic',
name='last_updated',
field=models.DateTimeField(auto_now=True, verbose_name='The last time this item was edited.'),
),
migrations.AlterField(
model_name='topic',
name='last_updater',
field=models.ForeignKey(related_name='last_to_update_topic_set', editable=False, to=settings.AUTH_USER_MODEL, help_text='The last user to update this item.'),
),
migrations.AlterField(
model_name='topic',
name='markup',
field=models.TextField(help_text='Text for this item that uses CommonMark syntax, with Working Notes-specific additions for notes, topics, and documents.', null=True, blank=True),
),
migrations.AlterField(
model_name='topic',
name='markup_html',
field=editorsnotes.main.fields.XHTMLField(help_text='The markup text for this item rendered into HTML.', null=True, editable=False, blank=True),
),
migrations.AlterField(
model_name='topicassignment',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='The time this item was created.'),
),
migrations.AlterField(
model_name='topicassignment',
name='creator',
field=models.ForeignKey(related_name='created_topicassignment_set', editable=False, to=settings.AUTH_USER_MODEL, help_text='The user who created this item.'),
),
migrations.AlterField(
model_name='transcript',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='The time this item was created.'),
),
migrations.AlterField(
model_name='transcript',
name='creator',
field=models.ForeignKey(related_name='created_transcript_set', editable=False, to=settings.AUTH_USER_MODEL, help_text='The user who created this item.'),
),
migrations.AlterField(
model_name='transcript',
name='last_updated',
field=models.DateTimeField(auto_now=True, verbose_name='The last time this item was edited.'),
),
migrations.AlterField(
model_name='transcript',
name='last_updater',
field=models.ForeignKey(related_name='last_to_update_transcript_set', editable=False, to=settings.AUTH_USER_MODEL, help_text='The last user to update this item.'),
),
migrations.AlterField(
model_name='transcript',
name='markup',
field=models.TextField(help_text='Text for this item that uses CommonMark syntax, with Working Notes-specific additions for notes, topics, and documents.', null=True, blank=True),
),
migrations.AlterField(
model_name='transcript',
name='markup_html',
field=editorsnotes.main.fields.XHTMLField(help_text='The markup text for this item rendered into HTML.', null=True, editable=False, blank=True),
),
migrations.AlterField(
model_name='user',
name='profile',
field=models.CharField(help_text='Profile text for a user.', max_length=1000, null=True, blank=True),
),
]
| editorsnotes/editorsnotes | editorsnotes/main/migrations/0019_auto_20160229_0921.py | Python | agpl-3.0 | 10,543 |
"""
Login and logout views for the browseable API.
Add these to your root URLconf if you're using the browseable API and
your API requires authentication.
The urls must be namespaced as 'rest_framework', and you should make sure
your authentication settings include `SessionAuthentication`.
urlpatterns = patterns('',
...
url(r'^auth', include('rest_framework.urls', namespace='rest_framework'))
)
"""
from django.conf.urls.defaults import patterns, url
template_name = {'template_name': 'rest_framework/login.html'}
urlpatterns = patterns('django.contrib.auth.views',
url(r'^login/$', 'login', template_name, name='login'),
url(r'^logout/$', 'logout', template_name, name='logout'),
)
| voer-platform/vp.repo | vpr/rest_framework/urls.py | Python | agpl-3.0 | 724 |
__author__ = 'marijn'
from setuptools import setup
setup(
name="goal_notifier",
version="0.0.0",
license="AGPL3",
packages=['goal_notifier'],
requires=[
"google-api-python-client",
"pykka",
"pydub",
"pyopenssl",
],
scripts=["goal_notifier"]
) | marijnvriens/goal_notifier | setup.py | Python | agpl-3.0 | 304 |
from django.contrib.auth.models import User, Group
from django.test import Client
from rest_framework import status
from app.models import Project, Task
from app.models import Setting
from app.models import Theme
from webodm import settings
from .classes import BootTestCase
from django.core.exceptions import ValidationError
class TestApp(BootTestCase):
fixtures = ['test_processingnodes', ]
def setUp(self):
self.credentials = {
'username': 'testuser',
'password': 'test1234',
'email': '[email protected]'}
# Create a test Group
my_group, created = Group.objects.get_or_create(name='test_group')
# Add user to test Group
User.objects.get(pk=1).groups.add(my_group)
def test_user_login(self):
c = Client()
# User points the browser to the landing page
res = c.post('/', follow=True)
# the user is not logged in
self.assertFalse(res.context['user'].is_authenticated)
# and is redirected to the login page
self.assertRedirects(res, '/login/')
# The login page is being rendered by the correct template
self.assertTemplateUsed(res, 'registration/login.html')
# asks the user to login using a set of valid credentials
res = c.post('/login/', data=self.credentials, follow=True)
# The system acknowledges him
self.assertTrue(res.context['user'].is_authenticated)
# and moves him at the dashboard
self.assertTemplateUsed(res, 'app/dashboard.html')
def test_views(self):
c = Client()
# Connecting to dashboard without auth redirects to /
res = c.get('/dashboard/', follow=True)
self.assertFalse(res.context['user'].is_authenticated)
self.assertRedirects(res, '/login/?next=/dashboard/')
res = c.get('/processingnode/1/', follow=True)
self.assertRedirects(res, '/login/?next=/processingnode/1/')
res = c.get('/map/project/1/', follow=True)
self.assertRedirects(res, '/login/?next=/map/project/1/')
res = c.get('/3d/project/1/task/1/', follow=True)
self.assertRedirects(res, '/login/?next=/3d/project/1/task/1/')
# Login
c.post('/login/', data=self.credentials, follow=True)
# We should have a project created from the dashboard
self.assertTrue(Project.objects.count() >= 1)
# Can access API page
res = c.get('/api/')
self.assertTrue(res.status_code == status.HTTP_200_OK)
# We can access a processingnode view that exists
res = c.get('/processingnode/1/')
self.assertTrue(res.status_code == 200)
self.assertTemplateUsed(res, 'app/processing_node.html')
# We can access a processingnode that is offline
# (and there's a warning message when we do that)
res = c.get('/processingnode/2/')
self.assertTrue(res.status_code == 200)
self.assertTemplateUsed(res, 'app/processing_node.html')
message = list(res.context['messages'])[0]
self.assertEqual(message.tags, 'warning')
self.assertTrue("offline" in message.message)
res = c.get('/processingnode/9999/')
self.assertTrue(res.status_code == 404)
res = c.get('/processingnode/abc/')
self.assertTrue(res.status_code == 404)
# /map/ and /3d/ views
user = User.objects.get(username="testuser")
other_user = User.objects.get(username="testuser2")
project = Project.objects.create(owner=user)
task = Task.objects.create(project=project)
other_project = Project.objects.create(owner=other_user)
other_task = Task.objects.create(project=other_project)
# Cannot access a project that we have no access to, or that does not exist
for project_id in [other_project.id, 99999]:
res = c.get('/map/project/{}/'.format(project_id))
self.assertTrue(res.status_code == status.HTTP_404_NOT_FOUND)
# We can access a project that we have access to
res = c.get('/map/project/{}/'.format(project.id))
self.assertTrue(res.status_code == status.HTTP_200_OK)
# 3D views need project and task parameters
res = c.get('/3d/project/{}/'.format(project.id))
self.assertTrue(res.status_code == status.HTTP_404_NOT_FOUND)
# Cannot access a 3d view for a task we have no access to
res = c.get('/3d/project/{}/task/{}/'.format(other_project.id, other_task.id))
self.assertTrue(res.status_code == status.HTTP_404_NOT_FOUND)
# Can access 3d view for task we have access to
res = c.get('/3d/project/{}/task/{}/'.format(project.id, task.id))
self.assertTrue(res.status_code == status.HTTP_200_OK)
# Cannot access public URLs unless a task is shared
def test_public_views(client, expectedStatus):
res = client.get('/public/task/{}/map/'.format(task.id))
self.assertTrue(res.status_code == expectedStatus)
res = client.get('/public/task/{}/3d/'.format(task.id))
self.assertTrue(res.status_code == expectedStatus)
res = client.get('/public/task/{}/iframe/3d/'.format(task.id))
self.assertTrue(res.status_code == expectedStatus)
res = client.get('/public/task/{}/iframe/map/'.format(task.id))
self.assertTrue(res.status_code == expectedStatus)
res = client.get('/public/task/{}/json/'.format(task.id))
self.assertTrue(res.status_code == expectedStatus)
test_public_views(c, status.HTTP_404_NOT_FOUND)
# Share task
task.public = True
task.save()
# Can now access URLs even as anonymous user
ac = Client()
test_public_views(ac, status.HTTP_200_OK)
def test_admin_views(self):
c = Client()
c.login(username='testsuperuser', password='test1234')
settingId = Setting.objects.all()[0].id # During tests, sometimes this is != 1
themeId = Theme.objects.all()[0].id # During tests, sometimes this is != 1
# Can access admin menu items
admin_menu_items = ['/admin/app/setting/{}/change/'.format(settingId),
'/admin/app/theme/{}/change/'.format(themeId),
'/admin/',
'/admin/app/plugin/',
'/admin/auth/user/',
'/admin/auth/group/',
]
for url in admin_menu_items:
res = c.get(url)
self.assertEqual(res.status_code, status.HTTP_200_OK)
# Cannot access dev tools (not in dev mode)
settings.DEV = False
self.assertEqual(c.get('/dev-tools/').status_code, status.HTTP_404_NOT_FOUND)
settings.DEV = True
# Can access in dev mode
self.assertEqual(c.get('/dev-tools/').status_code, status.HTTP_200_OK)
# Cannot access admin views as normal user
c.logout()
c.login(username='testuser', password='test1234')
# Can never access dev tools as user, even in dev mode
self.assertRedirects(c.get('/dev-tools/', follow=True), '/login/?next=/dev-tools/')
settings.DEV = False
for url in admin_menu_items:
res = c.get(url, follow=True)
self.assertRedirects(res, '/admin/login/?next={}'.format(url))
def test_default_group(self):
# It exists
self.assertTrue(Group.objects.filter(name='Default').count() == 1)
# Verify that all new users are assigned to default group
u = User.objects.create_user(username="default_user")
u.refresh_from_db()
self.assertTrue(u.groups.filter(name='Default').count() == 1)
def test_projects(self):
# Get a normal user
user = User.objects.get(username="testuser")
self.assertFalse(user.is_superuser)
# Create a new project
p = Project.objects.create(owner=user, name="test")
# Have the proper permissions been set?
self.assertTrue(user.has_perm("view_project", p))
self.assertTrue(user.has_perm("add_project", p))
self.assertTrue(user.has_perm("change_project", p))
self.assertTrue(user.has_perm("delete_project", p))
# Get a superuser
superUser = User.objects.get(username="testsuperuser")
self.assertTrue(superUser.is_superuser)
# He should also have permissions, although not explicitly set
self.assertTrue(superUser.has_perm("delete_project", p))
# Get another user
anotherUser = User.objects.get(username="testuser2")
self.assertFalse(anotherUser.is_superuser)
# Should not have permission
self.assertFalse(anotherUser.has_perm("delete_project", p))
def test_tasks(self):
# Create a new task
p = Project.objects.create(owner=User.objects.get(username="testuser"), name="test")
task = Task.objects.create(project=p)
# Test options validation
task.options = [{'name': 'test', 'value': 1}]
self.assertTrue(task.save() is None)
task.options = {'test': 1}
self.assertRaises(ValidationError, task.save)
task.options = [{'name': 'test', 'value': 1}, {"invalid": 1}]
self.assertRaises(ValidationError, task.save)
| OpenDroneMap/WebODM | app/tests/test_app.py | Python | agpl-3.0 | 9,412 |
# coding=utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from jormungandr.interfaces.v1 import Uri
from jormungandr.interfaces.v1 import Coverage
from jormungandr.interfaces.v1 import Journeys
from jormungandr.interfaces.v1 import Schedules
from jormungandr.interfaces.v1 import Places
from jormungandr.interfaces.v1 import Ptobjects
from jormungandr.interfaces.v1 import Coord
from jormungandr.interfaces.v1 import Disruptions
from jormungandr.interfaces.v1 import Calendars
from jormungandr.interfaces.v1 import converters_collection_type
from jormungandr.interfaces.v1 import Status
from werkzeug.routing import BaseConverter, FloatConverter, PathConverter
from jormungandr.modules_loader import AModule
from resources import Index
class RegionConverter(BaseConverter):
""" The region you want to query"""
def __init__(self, *args, **kwargs):
BaseConverter.__init__(self, *args, **kwargs)
self.type_ = "string"
self.regex = '[^(/;)]+'
class LonConverter(FloatConverter):
""" The longitude of where the coord you want to query"""
def __init__(self, *args, **kwargs):
FloatConverter.__init__(self, *args, **kwargs)
self.type_ = "float"
self.regex = '-?\\d+(\\.\\d+)?'
class LatConverter(FloatConverter):
""" The latitude of where the coord you want to query"""
def __init__(self, *args, **kwargs):
FloatConverter.__init__(self, *args, **kwargs)
self.type_ = "float"
self.regex = '-?\\d+(\\.\\d+)?'
class UriConverter(PathConverter):
"""First part of the uri"""
def __init__(self, *args, **kwargs):
PathConverter.__init__(self, *args, **kwargs)
self.type_ = "string"
class IdConverter(BaseConverter):
"""Id of the object you want to query"""
def __init__(self, *args, **kwargs):
BaseConverter.__init__(self, *args, **kwargs)
self.type_ = "string"
class V1Routing(AModule):
def __init__(self, api, name):
super(V1Routing, self).__init__(api, name,
description='Current version of navitia API',
status='current',
index_endpoint='index')
def setup(self):
self.api.app.url_map.converters['region'] = RegionConverter
self.api.app.url_map.converters['lon'] = LonConverter
self.api.app.url_map.converters['lat'] = LatConverter
self.api.app.url_map.converters['uri'] = UriConverter
self.api.app.url_map.converters['id'] = IdConverter
self.api.app.url_map.strict_slashes = False
self.module_resources_manager.register_resource(Index.Index())
self.add_resource(Index.Index,
'/',
'',
endpoint='index')
self.module_resources_manager.register_resource(Index.TechnicalStatus())
self.add_resource(Index.TechnicalStatus,
'/status',
endpoint='technical_status')
coverage = '/coverage/'
region = coverage + '<region:region>/'
coord = coverage + '<lon:lon>;<lat:lat>/'
self.add_resource(Coverage.Coverage,
coverage,
region,
coord,
endpoint='coverage')
self.add_resource(Coord.Coord,
'/coord/<lon:lon>;<lat:lat>',
'/coords/<lon:lon>;<lat:lat>',
endpoint='coord')
collecs = converters_collection_type.collections_to_resource_type.keys()
for collection in collecs:
self.add_resource(getattr(Uri, collection)(True),
region + collection,
coord + collection,
region + '<uri:uri>/' + collection,
coord + '<uri:uri>/' + collection,
endpoint=collection + '.collection')
self.add_resource(getattr(Uri, collection)(False),
region + collection + '/<id:id>',
coord + collection + '/<id:id>',
region + '<uri:uri>/' + collection + '/<id:id>',
coord + '<uri:uri>/' + collection + '/<id:id>',
endpoint=collection + '.id')
collecs = ["routes", "lines", "line_groups", "networks", "stop_areas", "stop_points",
"vehicle_journeys"]
for collection in collecs:
self.add_resource(getattr(Uri, collection)(True),
'/' + collection,
endpoint=collection + '.external_codes')
self.add_resource(Places.Places,
region + 'places',
coord + 'places',
'/places',
endpoint='places')
self.add_resource(Ptobjects.Ptobjects,
region + 'pt_objects',
coord + 'pt_objects',
endpoint='pt_objects')
self.add_resource(Places.PlaceUri,
region + 'places/<id:id>',
coord + 'places/<id:id>',
endpoint='place_uri')
self.add_resource(Places.PlacesNearby,
region + 'places_nearby',
coord + 'places_nearby',
region + '<uri:uri>/places_nearby',
coord + '<uri:uri>/places_nearby',
endpoint='places_nearby')
self.add_resource(Journeys.Journeys,
region + '<uri:uri>/journeys',
coord + '<uri:uri>/journeys',
region + 'journeys',
coord + 'journeys',
'/journeys',
endpoint='journeys')
self.add_resource(Schedules.RouteSchedules,
region + '<uri:uri>/route_schedules',
coord + '<uri:uri>/route_schedules',
'/route_schedules',
endpoint='route_schedules')
self.add_resource(Schedules.NextArrivals,
region + '<uri:uri>/arrivals',
coord + '<uri:uri>/arrivals',
region + 'arrivals',
coord + 'arrivals',
endpoint='arrivals')
self.add_resource(Schedules.NextDepartures,
region + '<uri:uri>/departures',
coord + '<uri:uri>/departures',
region + 'departures',
coord + 'departures',
endpoint='departures')
self.add_resource(Schedules.StopSchedules,
region + '<uri:uri>/stop_schedules',
coord + '<uri:uri>/stop_schedules',
'/stop_schedules',
endpoint='stop_schedules')
self.add_resource(Disruptions.TrafficReport,
region + 'traffic_reports',
region + '<uri:uri>/traffic_reports',
endpoint='traffic_reports')
self.add_resource(Status.Status,
region + 'status',
endpoint='status')
self.add_resource(Calendars.Calendars,
region + 'calendars',
region + '<uri:uri>/calendars',
region + "calendars/<id:id>",
endpoint="calendars")
| TeXitoi/navitia | source/jormungandr/jormungandr/modules/v1_routing/v1_routing.py | Python | agpl-3.0 | 9,012 |
# Copyright 2019 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
class EbillPaymentContract(models.Model):
_inherit = "ebill.payment.contract"
paynet_account_number = fields.Char(string="Paynet ID", size=20)
is_paynet_contract = fields.Boolean(
compute="_compute_is_paynet_contract", store=False
)
paynet_service_id = fields.Many2one(
comodel_name="paynet.service", string="Paynet Service", ondelete="restrict"
)
payment_type = fields.Selection(
selection=[("qr", "QR"), ("isr", "ISR")],
string="Payment method",
default="qr",
help="Payment type to use for the invoices sent,"
" PDF will be generated and attached accordingly.",
)
@api.depends("transmit_method_id")
def _compute_is_paynet_contract(self):
transmit_method = self.env.ref("ebill_paynet.paynet_transmit_method")
for record in self:
record.is_paynet_contract = record.transmit_method_id == transmit_method
@api.constrains("transmit_method_id", "paynet_account_number")
def _check_paynet_account_number(self):
for contract in self:
if not contract.is_paynet_contract:
continue
if not contract.paynet_account_number:
raise ValidationError(
_("The Paynet ID is required for a Paynet contract.")
)
@api.constrains("transmit_method_id", "paynet_service_id")
def _check_paynet_service_id(self):
for contract in self:
if contract.is_paynet_contract and not contract.paynet_service_id:
raise ValidationError(
_("A Paynet service is required for a Paynet contract.")
)
| OCA/l10n-switzerland | ebill_paynet/models/ebill_payment_contract.py | Python | agpl-3.0 | 1,855 |
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from django.conf import settings
from shoop.core.pricing import get_pricing_module
from shoop.core.pricing.default_pricing import DefaultPricingModule
from shoop.testing.factories import (
create_product, create_random_person, get_default_customer_group,
get_default_shop
)
from shoop.testing.utils import apply_request_middleware
original_pricing_module = settings.SHOOP_PRICING_MODULE
def setup_module(module):
settings.SHOOP_PRICING_MODULE = "default_pricing"
def teardown_module(module):
settings.SHOOP_PRICING_MODULE = original_pricing_module
def get_shop_with_tax(include_tax):
shop = get_default_shop()
shop.prices_include_tax = include_tax
shop.save()
return shop
def initialize_test(rf, include_tax=False):
shop = get_shop_with_tax(include_tax=include_tax)
group = get_default_customer_group()
customer = create_random_person()
customer.groups.add(group)
customer.save()
request = rf.get("/")
request.shop = shop
apply_request_middleware(request)
request.customer = customer
return request, shop, group
def test_module_is_active(): # this test is because we want to make sure `SimplePricing` is active
module = get_pricing_module()
assert isinstance(module, DefaultPricingModule)
@pytest.mark.django_db
def test_default_price_none_allowed(rf):
request, shop, group = initialize_test(rf, False)
shop = get_default_shop()
product = create_product("test-product", shop=shop, default_price=None)
assert product.get_price(request) == shop.create_price(0)
@pytest.mark.django_db
def test_default_price_zero_allowed(rf):
request, shop, group = initialize_test(rf, False)
shop = get_default_shop()
product = create_product("test-product", shop=shop, default_price=0)
assert product.get_price(request) == shop.create_price(0)
@pytest.mark.django_db
def test_default_price_value_allowed(rf):
request, shop, group = initialize_test(rf, False)
shop = get_default_shop()
product = create_product("test-product", shop=shop, default_price=100)
assert product.get_price(request) == shop.create_price(100)
@pytest.mark.django_db
def test_non_one_quantity(rf):
request, shop, group = initialize_test(rf, False)
shop = get_default_shop()
product = create_product("test-product", shop=shop, default_price=100)
assert product.get_price(request, quantity=5) == shop.create_price(500)
| akx/shoop | shoop_tests/core/test_default_pricing.py | Python | agpl-3.0 | 2,696 |
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import deform
from pyramid.view import view_config
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from pontus.default_behavior import Cancel
from pontus.form import FormView
from pontus.view import BasicView
from pontus.view_operation import MultipleView
from novaideo.content.processes.reports_management.behaviors import Restor
from novaideo.core import SignalableEntity
from novaideo import _
class RestorViewStudyRestor(BasicView):
title = _('Alert for restoring')
name = 'alertforpublication'
template = 'novaideo:views/reports_management/templates/alert_restor.pt'
def update(self):
result = {}
values = {'context': self.context}
body = self.content(args=values, template=self.template)['body']
item = self.adapt_item(body, self.viewid)
result['coordinates'] = {self.coordinates: [item]}
return result
class RestorFormView(FormView):
title = _('Restore')
behaviors = [Restor, Cancel]
formid = 'formrestor'
name = 'formrestor'
def before_update(self):
self.action = self.request.resource_url(
self.context, 'novaideoapi',
query={'op': 'update_action_view',
'node_id': Restor.node_definition.id})
self.schema.widget = deform.widget.FormWidget(
css_class='deform novaideo-ajax-form')
@view_config(
name='restor',
context=SignalableEntity,
renderer='pontus:templates/views_templates/grid.pt',
)
class RestorView(MultipleView):
title = _('Restore')
name = 'restor'
behaviors = [Restor]
viewid = 'restorentity'
template = 'pontus:templates/views_templates/simple_multipleview.pt'
views = (RestorViewStudyRestor, RestorFormView)
validators = [Restor.get_validator()]
DEFAULTMAPPING_ACTIONS_VIEWS.update(
{Restor: RestorView})
| ecreall/nova-ideo | novaideo/views/reports_management/restor.py | Python | agpl-3.0 | 2,013 |
# coding=utf-8
# This file is part of Bika LIMS
#
# Copyright 2011-2016 by it's authors.
# Some rights reserved. See LICENSE.txt, AUTHORS.txt.
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from bika.lims.browser.bika_listing import BikaListingTable
from bika.lims.browser.worksheet.views.analyses import AnalysesView
class AnalysesTransposedView(AnalysesView):
""" The view for displaying the table of manage_results transposed.
Analysis Requests are displayed in columns and analyses in rows.
Uses most of the logic provided by BikaListingView through
bika.lims.worksheet.views.AnalysesView to generate the items,
but renders its own template, which is highly specific for
display analysis results. Because of this, some generic
BikaListing functionalities, such as sorting, pagination,
contextual menus for columns, etc. will not work in this view.
"""
def contents_table(self, table_only = True):
""" Overrides contents_table method from the parent class
BikaListingView, using the transposed template instead
of the classic template.
"""
table = AnalysesTransposedTable(bika_listing = self, table_only = True)
return table.render(self)
class AnalysesTransposedTable(BikaListingTable):
""" The BikaListingTable that uses a transposed template for
displaying the results.
"""
render = ViewPageTemplateFile("../templates/analyses_transposed.pt")
render_cell = ViewPageTemplateFile("../templates/analyses_transposed_cell.pt")
def __init__(self, bika_listing = None, table_only = False):
BikaListingTable.__init__(self, bika_listing, True)
self.rows_headers = []
self.trans_items = {}
self.positions = []
self._transpose_data()
def _transpose_data(self):
cached = []
index = 0
#ignore = ['Analysis', 'Service', 'Result', 'ResultDM']
include = ['Attachments', 'DetectionLimit', 'DueDate','Pos', 'ResultDM']
for col in self.bika_listing.review_state['columns']:
if col == 'Result':
# Further interims will be inserted in this position
resindex = index
if col not in include:
continue
lcol = self.bika_listing.columns[col]
self.rows_headers.append({'id': col,
'title': lcol['title'],
'type': lcol.get('type',''),
'row_type': 'field',
'hidden': not lcol.get('toggle', True),
'input_class': lcol.get('input_class',''),
'input_width': lcol.get('input_width','')})
cached.append(col)
index += 1
for item in self.items:
if item['Service'] not in cached:
self.rows_headers.insert(resindex,
{'id': item['Service'],
'title': item['title'],
'type': item.get('type',''),
'row_type': 'analysis',
'index': index})
resindex += 1
cached.append(item['Service'])
pos = item['Pos']
if pos in self.trans_items:
self.trans_items[pos][item['Service']] = item
else:
self.trans_items[pos] = {item['Service']: item}
if pos not in self.positions:
self.positions.append(pos)
def rendered_items(self, cat=None, **kwargs):
return ''
def render_row_cell(self, rowheader, position = ''):
self.current_rowhead = rowheader
self.current_position = position
if rowheader['row_type'] == 'field':
# Only the first item for this position contains common
# data for all the analyses with the same position
its = [i for i in self.items if i['Pos'] == position]
self.current_item = its[0] if its else {}
elif position in self.trans_items \
and rowheader['id'] in self.trans_items[position]:
self.current_item = self.trans_items[position][rowheader['id']]
else:
return ''
return self.render_cell()
| rockfruit/bika.lims | bika/lims/browser/worksheet/views/analyses_transposed.py | Python | agpl-3.0 | 4,358 |
# -*- coding: utf-8 -*-
# © 2011 Raphaël Valyi, Renato Lima, Guewen Baconnier, Sodexis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import api, models, fields
class ExceptionRule(models.Model):
_inherit = 'exception.rule'
rule_group = fields.Selection(
selection_add=[('sale', 'Sale')],
)
model = fields.Selection(
selection_add=[
('sale.order', 'Sale order'),
('sale.order.line', 'Sale order line'),
])
class SaleOrder(models.Model):
_inherit = ['sale.order', 'base.exception']
_name = 'sale.order'
_order = 'main_exception_id asc, date_order desc, name desc'
rule_group = fields.Selection(
selection_add=[('sale', 'Sale')],
default='sale',
)
@api.model
def test_all_draft_orders(self):
order_set = self.search([('state', '=', 'draft')])
order_set.test_exceptions()
return True
@api.constrains('ignore_exception', 'order_line', 'state')
def sale_check_exception(self):
orders = self.filtered(lambda s: s.state == 'sale')
if orders:
orders._check_exception()
@api.onchange('order_line')
def onchange_ignore_exception(self):
if self.state == 'sale':
self.ignore_exception = False
@api.multi
def action_confirm(self):
if self.detect_exceptions():
return self._popup_exceptions()
else:
return super(SaleOrder, self).action_confirm()
@api.multi
def action_draft(self):
res = super(SaleOrder, self).action_draft()
orders = self.filtered(lambda s: s.ignore_exception)
orders.write({
'ignore_exception': False,
})
return res
def _sale_get_lines(self):
self.ensure_one()
return self.order_line
@api.model
def _get_popup_action(self):
action = self.env.ref('sale_exception.action_sale_exception_confirm')
return action
| kittiu/sale-workflow | sale_exception/models/sale.py | Python | agpl-3.0 | 2,008 |
# -*- coding: utf8 -*-
#
# Copyright (C) 2017 NDP Systèmes (<http://www.ndp-systemes.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
#
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from . import stock_procurement_split
| ndp-systemes/odoo-addons | stock_procurement_split/__init__.py | Python | agpl-3.0 | 822 |
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from open_municipio.locations.models import Location
class LocationAdmin(admin.ModelAdmin):
list_display = ('name', 'count')
admin.site.register(Location, LocationAdmin)
| openpolis/open_municipio | open_municipio/locations/admin.py | Python | agpl-3.0 | 267 |
import logging
import json
from django.test.client import Client, RequestFactory
from django.test.utils import override_settings
from django.contrib.auth.models import User
from django.core.management import call_command
from django.core.urlresolvers import reverse
from mock import patch, ANY, Mock
from nose.tools import assert_true, assert_equal # pylint: disable=E0611
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from django_comment_client.base import views
from django_comment_client.tests.unicode import UnicodeTestMixin
from django_comment_common.models import Role, FORUM_ROLE_STUDENT
from django_comment_common.utils import seed_permissions_roles
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from util.testing import UrlResetMixin
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
log = logging.getLogger(__name__)
CS_PREFIX = "http://localhost:4567/api/v1"
class MockRequestSetupMixin(object):
def _create_repsonse_mock(self, data):
return Mock(text=json.dumps(data), json=Mock(return_value=data))\
def _set_mock_request_data(self, mock_request, data):
mock_request.return_value = self._create_repsonse_mock(data)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
@patch('lms.lib.comment_client.utils.requests.request')
class ViewsTestCase(UrlResetMixin, ModuleStoreTestCase, MockRequestSetupMixin):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
# Patching the ENABLE_DISCUSSION_SERVICE value affects the contents of urls.py,
# so we need to call super.setUp() which reloads urls.py (because
# of the UrlResetMixin)
super(ViewsTestCase, self).setUp(create_user=False)
# create a course
self.course = CourseFactory.create(org='MITx', course='999',
display_name='Robot Super Course')
self.course_id = self.course.id
# seed the forums permissions and roles
call_command('seed_permissions_roles', self.course_id.to_deprecated_string())
# Patch the comment client user save method so it does not try
# to create a new cc user when creating a django user
with patch('student.models.cc.User.save'):
uname = 'student'
email = '[email protected]'
password = 'test'
# Create the user and make them active so we can log them in.
self.student = User.objects.create_user(uname, email, password)
self.student.is_active = True
self.student.save()
# Enroll the student in the course
CourseEnrollmentFactory(user=self.student,
course_id=self.course_id)
self.client = Client()
assert_true(self.client.login(username='student', password='test'))
def test_create_thread(self, mock_request):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"thread_type": "discussion",
"title": "Hello",
"body": "this is a post",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": False,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [],
"type": "thread",
"group_id": None,
"pinned": False,
"endorsed": False,
"unread_comments_count": 0,
"read": False,
"comments_count": 0,
})
thread = {
"thread_type": "discussion",
"body": ["this is a post"],
"anonymous_to_peers": ["false"],
"auto_subscribe": ["false"],
"anonymous": ["false"],
"title": ["Hello"],
}
url = reverse('create_thread', kwargs={'commentable_id': 'i4x-MITx-999-course-Robot_Super_Course',
'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url, data=thread)
assert_true(mock_request.called)
mock_request.assert_called_with(
'post',
'{prefix}/i4x-MITx-999-course-Robot_Super_Course/threads'.format(prefix=CS_PREFIX),
data={
'thread_type': 'discussion',
'body': u'this is a post',
'anonymous_to_peers': False, 'user_id': 1,
'title': u'Hello',
'commentable_id': u'i4x-MITx-999-course-Robot_Super_Course',
'anonymous': False,
'course_id': u'MITx/999/Robot_Super_Course',
},
params={'request_id': ANY},
headers=ANY,
timeout=5
)
assert_equal(response.status_code, 200)
def test_delete_comment(self, mock_request):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
test_comment_id = "test_comment_id"
request = RequestFactory().post("dummy_url", {"id": test_comment_id})
request.user = self.student
request.view_name = "delete_comment"
response = views.delete_comment(request, course_id=self.course.id.to_deprecated_string(), comment_id=test_comment_id)
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
args = mock_request.call_args[0]
self.assertEqual(args[0], "delete")
self.assertTrue(args[1].endswith("/{}".format(test_comment_id)))
def _setup_mock_request(self, mock_request, include_depth=False):
"""
Ensure that mock_request returns the data necessary to make views
function correctly
"""
mock_request.return_value.status_code = 200
data = {
"user_id": str(self.student.id),
"closed": False,
}
if include_depth:
data["depth"] = 0
self._set_mock_request_data(mock_request, data)
def _test_request_error(self, view_name, view_kwargs, data, mock_request):
"""
Submit a request against the given view with the given data and ensure
that the result is a 400 error and that no data was posted using
mock_request
"""
self._setup_mock_request(mock_request, include_depth=(view_name == "create_sub_comment"))
response = self.client.post(reverse(view_name, kwargs=view_kwargs), data=data)
self.assertEqual(response.status_code, 400)
for call in mock_request.call_args_list:
self.assertEqual(call[0][0].lower(), "get")
def test_create_thread_no_title(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": "foo"},
mock_request
)
def test_create_thread_empty_title(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": "foo", "title": " "},
mock_request
)
def test_create_thread_no_body(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"title": "foo"},
mock_request
)
def test_create_thread_empty_body(self, mock_request):
self._test_request_error(
"create_thread",
{"commentable_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " ", "title": "foo"},
mock_request
)
def test_update_thread_no_title(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": "foo"},
mock_request
)
def test_update_thread_empty_title(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": "foo", "title": " "},
mock_request
)
def test_update_thread_no_body(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"title": "foo"},
mock_request
)
def test_update_thread_empty_body(self, mock_request):
self._test_request_error(
"update_thread",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " ", "title": "foo"},
mock_request
)
def test_create_comment_no_body(self, mock_request):
self._test_request_error(
"create_comment",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{},
mock_request
)
def test_create_comment_empty_body(self, mock_request):
self._test_request_error(
"create_comment",
{"thread_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " "},
mock_request
)
def test_create_sub_comment_no_body(self, mock_request):
self._test_request_error(
"create_sub_comment",
{"comment_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{},
mock_request
)
def test_create_sub_comment_empty_body(self, mock_request):
self._test_request_error(
"create_sub_comment",
{"comment_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " "},
mock_request
)
def test_update_comment_no_body(self, mock_request):
self._test_request_error(
"update_comment",
{"comment_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{},
mock_request
)
def test_update_comment_empty_body(self, mock_request):
self._test_request_error(
"update_comment",
{"comment_id": "dummy", "course_id": self.course_id.to_deprecated_string()},
{"body": " "},
mock_request
)
def test_update_comment_basic(self, mock_request):
self._setup_mock_request(mock_request)
comment_id = "test_comment_id"
updated_body = "updated body"
response = self.client.post(
reverse(
"update_comment",
kwargs={"course_id": self.course_id.to_deprecated_string(), "comment_id": comment_id}
),
data={"body": updated_body}
)
self.assertEqual(response.status_code, 200)
mock_request.assert_called_with(
"put",
"{prefix}/comments/{comment_id}".format(prefix=CS_PREFIX, comment_id=comment_id),
headers=ANY,
params=ANY,
timeout=ANY,
data={"body": updated_body}
)
def test_flag_thread(self, mock_request):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"title": "Hello",
"body": "this is a post",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": False,
"id": "518d4237b023791dca00000d",
"user_id": "1","username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [1],
"type": "thread",
"group_id": None,
"pinned": False,
"endorsed": False,
"unread_comments_count": 0,
"read": False,
"comments_count": 0,
})
url = reverse('flag_abuse_for_thread', kwargs={'thread_id': '518d4237b023791dca00000d', 'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/threads/518d4237b023791dca00000d/abuse_flag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
def test_un_flag_thread(self, mock_request):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"title": "Hello",
"body": "this is a post",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": False,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [],
"type": "thread",
"group_id": None,
"pinned": False,
"endorsed": False,
"unread_comments_count": 0,
"read": False,
"comments_count": 0
})
url = reverse('un_flag_abuse_for_thread', kwargs={'thread_id': '518d4237b023791dca00000d', 'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/threads/518d4237b023791dca00000d/abuse_unflag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/threads/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'mark_as_read': True, 'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
def test_flag_comment(self, mock_request):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"body": "this is a comment",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": False,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [1],
"type": "comment",
"endorsed": False
})
url = reverse('flag_abuse_for_comment', kwargs={'comment_id': '518d4237b023791dca00000d', 'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/comments/518d4237b023791dca00000d/abuse_flag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
def test_un_flag_comment(self, mock_request):
mock_request.return_value.status_code = 200
self._set_mock_request_data(mock_request, {
"body": "this is a comment",
"course_id": "MITx/999/Robot_Super_Course",
"anonymous": False,
"anonymous_to_peers": False,
"commentable_id": "i4x-MITx-999-course-Robot_Super_Course",
"created_at": "2013-05-10T18:53:43Z",
"updated_at": "2013-05-10T18:53:43Z",
"at_position_list": [],
"closed": False,
"id": "518d4237b023791dca00000d",
"user_id": "1",
"username": "robot",
"votes": {
"count": 0,
"up_count": 0,
"down_count": 0,
"point": 0
},
"abuse_flaggers": [],
"type": "comment",
"endorsed": False
})
url = reverse('un_flag_abuse_for_comment', kwargs={'comment_id': '518d4237b023791dca00000d', 'course_id': self.course_id.to_deprecated_string()})
response = self.client.post(url)
assert_true(mock_request.called)
call_list = [
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('put', '{prefix}/comments/518d4237b023791dca00000d/abuse_unflag'.format(prefix=CS_PREFIX)),
{
'data': {'user_id': '1'},
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
),
(
('get', '{prefix}/comments/518d4237b023791dca00000d'.format(prefix=CS_PREFIX)),
{
'data': None,
'params': {'request_id': ANY},
'headers': ANY,
'timeout': 5
}
)
]
assert_equal(call_list, mock_request.call_args_list)
assert_equal(response.status_code, 200)
@patch("lms.lib.comment_client.utils.requests.request")
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class ViewPermissionsTestCase(UrlResetMixin, ModuleStoreTestCase, MockRequestSetupMixin):
@patch.dict("django.conf.settings.FEATURES", {"ENABLE_DISCUSSION_SERVICE": True})
def setUp(self):
super(ViewPermissionsTestCase, self).setUp()
self.password = "test password"
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create(password=self.password)
self.moderator = UserFactory.create(password=self.password)
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
CourseEnrollmentFactory(user=self.moderator, course_id=self.course.id)
self.moderator.roles.add(Role.objects.get(name="Moderator", course_id=self.course.id))
def test_pin_thread_as_student(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("pin_thread", kwargs={"course_id": self.course.id.to_deprecated_string(), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 401)
def test_pin_thread_as_moderator(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.moderator.username, password=self.password)
response = self.client.post(
reverse("pin_thread", kwargs={"course_id": self.course.id.to_deprecated_string(), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
def test_un_pin_thread_as_student(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("un_pin_thread", kwargs={"course_id": self.course.id.to_deprecated_string(), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 401)
def test_un_pin_thread_as_moderator(self, mock_request):
self._set_mock_request_data(mock_request, {})
self.client.login(username=self.moderator.username, password=self.password)
response = self.client.post(
reverse("un_pin_thread", kwargs={"course_id": self.course.id.to_deprecated_string(), "thread_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
def _set_mock_request_thread_and_comment(self, mock_request, thread_data, comment_data):
def handle_request(*args, **kwargs):
url = args[1]
if "/threads/" in url:
return self._create_repsonse_mock(thread_data)
elif "/comments/" in url:
return self._create_repsonse_mock(comment_data)
else:
raise ArgumentError("Bad url to mock request")
mock_request.side_effect = handle_request
def test_endorse_response_as_staff(self, mock_request):
self._set_mock_request_thread_and_comment(
mock_request,
{"type": "thread", "thread_type": "question", "user_id": str(self.student.id)},
{"type": "comment", "thread_id": "dummy"}
)
self.client.login(username=self.moderator.username, password=self.password)
response = self.client.post(
reverse("endorse_comment", kwargs={"course_id": self.course.id.to_deprecated_string(), "comment_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
def test_endorse_response_as_student(self, mock_request):
self._set_mock_request_thread_and_comment(
mock_request,
{"type": "thread", "thread_type": "question", "user_id": str(self.moderator.id)},
{"type": "comment", "thread_id": "dummy"}
)
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("endorse_comment", kwargs={"course_id": self.course.id.to_deprecated_string(), "comment_id": "dummy"})
)
self.assertEqual(response.status_code, 401)
def test_endorse_response_as_student_question_author(self, mock_request):
self._set_mock_request_thread_and_comment(
mock_request,
{"type": "thread", "thread_type": "question", "user_id": str(self.student.id)},
{"type": "comment", "thread_id": "dummy"}
)
self.client.login(username=self.student.username, password=self.password)
response = self.client.post(
reverse("endorse_comment", kwargs={"course_id": self.course.id.to_deprecated_string(), "comment_id": "dummy"})
)
self.assertEqual(response.status_code, 200)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class CreateThreadUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
self._set_mock_request_data(mock_request, {})
request = RequestFactory().post("dummy_url", {"thread_type": "discussion", "body": text, "title": text})
request.user = self.student
request.view_name = "create_thread"
response = views.create_thread(request, course_id=self.course.id.to_deprecated_string(), commentable_id="test_commentable")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
self.assertEqual(mock_request.call_args[1]["data"]["title"], text)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class UpdateThreadUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
request = RequestFactory().post("dummy_url", {"body": text, "title": text})
request.user = self.student
request.view_name = "update_thread"
response = views.update_thread(request, course_id=self.course.id.to_deprecated_string(), thread_id="dummy_thread_id")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
self.assertEqual(mock_request.call_args[1]["data"]["title"], text)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class CreateCommentUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
self._set_mock_request_data(mock_request, {
"closed": False,
})
request = RequestFactory().post("dummy_url", {"body": text})
request.user = self.student
request.view_name = "create_comment"
response = views.create_comment(request, course_id=self.course.id.to_deprecated_string(), thread_id="dummy_thread_id")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class UpdateCommentUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
self._set_mock_request_data(mock_request, {
"user_id": str(self.student.id),
"closed": False,
})
request = RequestFactory().post("dummy_url", {"body": text})
request.user = self.student
request.view_name = "update_comment"
response = views.update_comment(request, course_id=self.course.id.to_deprecated_string(), comment_id="dummy_comment_id")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class CreateSubCommentUnicodeTestCase(ModuleStoreTestCase, UnicodeTestMixin, MockRequestSetupMixin):
def setUp(self):
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
@patch('lms.lib.comment_client.utils.requests.request')
def _test_unicode_data(self, text, mock_request):
self._set_mock_request_data(mock_request, {
"closed": False,
"depth": 1,
})
request = RequestFactory().post("dummy_url", {"body": text})
request.user = self.student
request.view_name = "create_sub_comment"
response = views.create_sub_comment(request, course_id=self.course.id.to_deprecated_string(), comment_id="dummy_comment_id")
self.assertEqual(response.status_code, 200)
self.assertTrue(mock_request.called)
self.assertEqual(mock_request.call_args[1]["data"]["body"], text)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class UsersEndpointTestCase(ModuleStoreTestCase, MockRequestSetupMixin):
def set_post_counts(self, mock_request, threads_count=1, comments_count=1):
"""
sets up a mock response from the comments service for getting post counts for our other_user
"""
self._set_mock_request_data(mock_request, {
"threads_count": threads_count,
"comments_count": comments_count,
})
def setUp(self):
self.course = CourseFactory.create()
seed_permissions_roles(self.course.id)
self.student = UserFactory.create()
self.enrollment = CourseEnrollmentFactory(user=self.student, course_id=self.course.id)
self.other_user = UserFactory.create(username="other")
CourseEnrollmentFactory(user=self.other_user, course_id=self.course.id)
def make_request(self, method='get', course_id=None, **kwargs):
course_id = course_id or self.course.id
request = getattr(RequestFactory(), method)("dummy_url", kwargs)
request.user = self.student
request.view_name = "users"
return views.users(request, course_id=course_id.to_deprecated_string())
@patch('lms.lib.comment_client.utils.requests.request')
def test_finds_exact_match(self, mock_request):
self.set_post_counts(mock_request)
response = self.make_request(username="other")
self.assertEqual(response.status_code, 200)
self.assertEqual(
json.loads(response.content)["users"],
[{"id": self.other_user.id, "username": self.other_user.username}]
)
@patch('lms.lib.comment_client.utils.requests.request')
def test_finds_no_match(self, mock_request):
self.set_post_counts(mock_request)
response = self.make_request(username="othor")
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)["users"], [])
def test_requires_GET(self):
response = self.make_request(method='post', username="other")
self.assertEqual(response.status_code, 405)
def test_requires_username_param(self):
response = self.make_request()
self.assertEqual(response.status_code, 400)
content = json.loads(response.content)
self.assertIn("errors", content)
self.assertNotIn("users", content)
def test_course_does_not_exist(self):
course_id = SlashSeparatedCourseKey.from_deprecated_string("does/not/exist")
response = self.make_request(course_id=course_id, username="other")
self.assertEqual(response.status_code, 404)
content = json.loads(response.content)
self.assertIn("errors", content)
self.assertNotIn("users", content)
def test_requires_requestor_enrolled_in_course(self):
# unenroll self.student from the course.
self.enrollment.delete()
response = self.make_request(username="other")
self.assertEqual(response.status_code, 404)
content = json.loads(response.content)
self.assertTrue(content.has_key("errors"))
self.assertFalse(content.has_key("users"))
@patch('lms.lib.comment_client.utils.requests.request')
def test_requires_matched_user_has_forum_content(self, mock_request):
self.set_post_counts(mock_request, 0, 0)
response = self.make_request(username="other")
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content)["users"], [])
| sudheerchintala/LearnEraPlatForm | lms/djangoapps/django_comment_client/base/tests.py | Python | agpl-3.0 | 35,501 |
"""
Key-value store that holds XBlock field data read out of Blockstore
"""
from collections import namedtuple
from weakref import WeakKeyDictionary
import logging
from xblock.exceptions import InvalidScopeError, NoSuchDefinition
from xblock.fields import Field, BlockScope, Scope, UserScope, Sentinel
from xblock.field_data import FieldData
from openedx.core.djangoapps.xblock.learning_context.manager import get_learning_context_impl
from openedx.core.djangolib.blockstore_cache import (
get_bundle_version_files_cached,
get_bundle_draft_files_cached,
)
log = logging.getLogger(__name__)
ActiveBlock = namedtuple('ActiveBlock', ['olx_hash', 'changed_fields'])
DELETED = Sentinel('DELETED') # Special value indicating a field was reset to its default value
CHILDREN_INCLUDES = Sentinel('CHILDREN_INCLUDES') # Key for a pseudo-field that stores the XBlock's children info
MAX_DEFINITIONS_LOADED = 100 # How many of the most recently used XBlocks' field data to keep in memory at max.
class BlockInstanceUniqueKey(object):
"""
An empty object used as a unique key for each XBlock instance, see
get_weak_key_for_block() and BlockstoreFieldData._get_active_block(). Every
XBlock instance will get a unique one of these keys, even if they are
otherwise identical. Its purpose is similar to `id(block)`.
"""
def get_weak_key_for_block(block):
"""
Given an XBlock instance, return an object with the same lifetime as the
block, suitable as a key to hold block-specific data in a WeakKeyDictionary.
"""
# We would like to make the XBlock instance 'block' itself the key of
# BlockstoreFieldData.active_blocks, so that we have exactly one entry per
# XBlock instance in memory, and they'll each be automatically freed by the
# WeakKeyDictionary as needed. But because XModules implement
# __eq__() in a way that reads all field values, just attempting to use
# the block as a dict key here will trigger infinite recursion. So
# instead we key the dict on an arbitrary object,
# block key = BlockInstanceUniqueKey() which we create here. That way
# the weak reference will still cause the entry in the WeakKeyDictionary to
# be freed automatically when the block is no longer needed, and we
# still get one entry per XBlock instance.
if not hasattr(block, '_field_data_key_obj'):
block._field_data_key_obj = BlockInstanceUniqueKey() # pylint: disable=protected-access
return block._field_data_key_obj # pylint: disable=protected-access
def get_olx_hash_for_definition_key(def_key):
"""
Given a BundleDefinitionLocator, which identifies a specific version of an
OLX file, return the hash of the OLX file as given by the Blockstore API.
"""
if def_key.bundle_version:
# This is referring to an immutable file (BundleVersions are immutable so this can be aggressively cached)
files_list = get_bundle_version_files_cached(def_key.bundle_uuid, def_key.bundle_version)
else:
# This is referring to a draft OLX file which may be recently updated:
files_list = get_bundle_draft_files_cached(def_key.bundle_uuid, def_key.draft_name)
for entry in files_list:
if entry.path == def_key.olx_path:
return entry.hash_digest
raise NoSuchDefinition("Could not load OLX file for key {}".format(def_key))
class BlockstoreFieldData(FieldData):
"""
An XBlock FieldData implementation that reads XBlock field data directly out
of Blockstore.
It requires that every XBlock have a BundleDefinitionLocator as its
"definition key", since the BundleDefinitionLocator is what specifies the
OLX file path and version to use.
Within Blockstore there is no mechanism for setting different field values
at the usage level compared to the definition level, so we treat
usage-scoped fields identically to definition-scoped fields.
"""
def __init__(self):
"""
Initialize this BlockstoreFieldData instance.
"""
# loaded definitions: a dict where the key is the hash of the XBlock's
# olx file (as stated by the Blockstore API), and the values is the
# dict of field data as loaded from that OLX file. The field data dicts
# in this should be considered immutable, and never modified.
self.loaded_definitions = {}
# Active blocks: this holds the field data *changes* for all the XBlocks
# that are currently in memory being used for something. We only keep a
# weak reference so that the memory will be freed when the XBlock is no
# longer needed (e.g. at the end of a request)
# The key of this dictionary is on ID object owned by the XBlock itself
# (see _get_active_block()) and the value is an ActiveBlock object
# (which holds olx_hash and changed_fields)
self.active_blocks = WeakKeyDictionary()
super(BlockstoreFieldData, self).__init__() # lint-amnesty, pylint: disable=super-with-arguments
def _getfield(self, block, name):
"""
Return the field with the given `name` from `block`.
If the XBlock doesn't have such a field, raises a KeyError.
"""
# First, get the field from the class, if defined
block_field = getattr(block.__class__, name, None)
if block_field is not None and isinstance(block_field, Field):
return block_field
# Not in the class, so name really doesn't name a field
raise KeyError(name)
def _check_field(self, block, name):
"""
Given a block and the name of one of its fields, check that we will be
able to read/write it.
"""
if name == CHILDREN_INCLUDES:
return # This is a pseudo-field used in conjunction with BlockstoreChildrenData
field = self._getfield(block, name)
if field.scope in (Scope.children, Scope.parent): # lint-amnesty, pylint: disable=no-else-raise
# This field data store is focused on definition-level field data, and children/parent is mostly
# relevant at the usage level. Scope.parent doesn't even seem to be used?
raise NotImplementedError("Setting Scope.children/parent is not supported by BlockstoreFieldData.")
else:
if field.scope.user != UserScope.NONE:
raise InvalidScopeError("BlockstoreFieldData only supports UserScope.NONE fields")
if field.scope.block not in (BlockScope.DEFINITION, BlockScope.USAGE):
raise InvalidScopeError(
"BlockstoreFieldData does not support BlockScope.{} fields".format(field.scope.block)
)
# There is also BlockScope.TYPE but we don't need to support that;
# it's mostly relevant as Scope.preferences(UserScope.ONE, BlockScope.TYPE)
# Which would be handled by a user-aware FieldData implementation
def _get_active_block(self, block):
"""
Get the ActiveBlock entry for the specified block, creating it if
necessary.
"""
key = get_weak_key_for_block(block)
if key not in self.active_blocks:
self.active_blocks[key] = ActiveBlock(
olx_hash=get_olx_hash_for_definition_key(block.scope_ids.def_id),
changed_fields={},
)
return self.active_blocks[key]
def get(self, block, name):
"""
Get the given field value from Blockstore
If the XBlock has been making changes to its fields, the value will be
in self._get_active_block(block).changed_fields[name]
Otherwise, the value comes from self.loaded_definitions which is a dict
of OLX file field data, keyed by the hash of the OLX file.
"""
self._check_field(block, name)
entry = self._get_active_block(block)
if name in entry.changed_fields:
value = entry.changed_fields[name]
if value == DELETED:
raise KeyError # KeyError means use the default value, since this field was deliberately set to default
return value
try:
saved_fields = self.loaded_definitions[entry.olx_hash]
except KeyError:
if name == CHILDREN_INCLUDES:
# Special case: parse_xml calls add_node_as_child which calls 'block.children.append()'
# BEFORE parse_xml is done, and .append() needs to read the value of children. So
return [] # start with an empty list, it will get filled in.
# Otherwise, this is an anomalous get() before the XML was fully loaded:
# This could happen if an XBlock's parse_xml() method tried to read a field before setting it,
# if an XBlock read field data in its constructor (forbidden), or if an XBlock was loaded via
# some means other than runtime.get_block(). One way this can happen is if you log/print an XBlock during
# XML parsing, because ScopedStorageMixin.__repr__ will try to print all field values, and any fields which
# aren't mentioned in the XML (which are left at their default) will be "not loaded yet."
log.exception(
"XBlock %s tried to read from field data (%s) that wasn't loaded from Blockstore!",
block.scope_ids.usage_id, name,
)
raise # Just use the default value for now; any exception raised here is caught anyways
return saved_fields[name]
# If 'name' is not found, this will raise KeyError, which means to use the default value
def set(self, block, name, value):
"""
Set the value of the field named `name`
"""
entry = self._get_active_block(block)
entry.changed_fields[name] = value
def delete(self, block, name):
"""
Reset the value of the field named `name` to the default
"""
self.set(block, name, DELETED)
def default(self, block, name):
"""
Get the default value for block's field 'name'.
The XBlock class will provide the default if KeyError is raised; this is
mostly for the purpose of context-specific overrides.
"""
raise KeyError(name)
def cache_fields(self, block):
"""
Cache field data:
This is called by the runtime after a block has parsed its OLX via its
parse_xml() methods and written all of its field values into this field
data store. The values will be stored in
self._get_active_block(block).changed_fields
so we know at this point that that isn't really "changed" field data,
it's the result of parsing the OLX. Save a copy into loaded_definitions.
"""
entry = self._get_active_block(block)
self.loaded_definitions[entry.olx_hash] = entry.changed_fields.copy()
# Reset changed_fields to indicate this block hasn't actually made any field data changes, just loaded from XML:
entry.changed_fields.clear()
if len(self.loaded_definitions) > MAX_DEFINITIONS_LOADED:
self.free_unused_definitions()
def has_changes(self, block):
"""
Does the specified block have any unsaved changes?
"""
entry = self._get_active_block(block)
return bool(entry.changed_fields)
def has_cached_definition(self, definition_key):
"""
Has the specified OLX file been loaded into memory?
"""
olx_hash = get_olx_hash_for_definition_key(definition_key)
return olx_hash in self.loaded_definitions
def free_unused_definitions(self):
"""
Free unused field data cache entries from self.loaded_definitions
as long as they're not in use.
"""
olx_hashes = set(self.loaded_definitions.keys())
olx_hashes_needed = set(entry.olx_hash for entry in self.active_blocks.values())
olx_hashes_safe_to_delete = olx_hashes - olx_hashes_needed
# To avoid doing this too often, randomly cull unused entries until
# we have only half as many as MAX_DEFINITIONS_LOADED in memory, if possible.
while olx_hashes_safe_to_delete and (len(self.loaded_definitions) > MAX_DEFINITIONS_LOADED / 2):
del self.loaded_definitions[olx_hashes_safe_to_delete.pop()]
class BlockstoreChildrenData(FieldData):
"""
An XBlock FieldData implementation that reads 'children' data out of
the definition fields in BlockstoreFieldData.
The children field contains usage keys and so is usage-specific; the
BlockstoreFieldData can only store field data that is not usage-specific. So
we store data about the <xblock-include /> elements that define the children
in BlockstoreFieldData (since that is not usage-specific), and this field
data implementation loads that <xblock-include /> data and transforms it
into the usage keys that comprise the standard .children field.
"""
def __init__(self, blockstore_field_data):
"""
Initialize this BlockstoreChildrenData instance.
"""
# The data store that holds Scope.usage and Scope.definition data:
self.authored_data_store = blockstore_field_data
super(BlockstoreChildrenData, self).__init__() # lint-amnesty, pylint: disable=super-with-arguments
def _check_field(self, block, name): # pylint: disable=unused-argument
"""
Given a block and the name of one of its fields, check that we will be
able to read/write it.
"""
if name != 'children':
raise InvalidScopeError("BlockstoreChildrenData can only read/write from a field named 'children'")
def get(self, block, name):
"""
Get the "children' field value.
We do this by reading the parsed <xblock-include /> values from
the regular authored data store and then transforming them to usage IDs.
"""
self._check_field(block, name)
children_includes = self.get_includes(block)
if not children_includes:
return []
# Now the .children field is required to be a list of usage IDs:
learning_context = get_learning_context_impl(block.scope_ids.usage_id)
child_usages = []
for parsed_include in children_includes:
child_usages.append(
learning_context.usage_for_child_include(
block.scope_ids.usage_id, block.scope_ids.def_id, parsed_include,
)
)
return child_usages
def set(self, block, name, value):
"""
Set the value of the field; requires name='children'
"""
self._check_field(block, name)
children_includes = self.authored_data_store.get(block, CHILDREN_INCLUDES)
if len(value) != len(children_includes):
raise RuntimeError(
"This runtime does not allow changing .children directly - use runtime.add_child_include instead."
)
# This is a no-op; the value of 'children' is derived from CHILDREN_INCLUDES
# so we never write to the children field directly. All we do is make sure it
# looks like it's still in sync with CHILDREN_INCLUDES
def get_includes(self, block):
"""
Get the list of <xblock-include /> elements representing this XBlock's
children.
"""
try:
return self.authored_data_store.get(block, CHILDREN_INCLUDES)
except KeyError:
# KeyError raised by an XBlock field data store means "use the
# default value", and the default value for the children field is an
# empty list.
return []
def append_include(self, block, parsed_include):
"""
Append an <xblock-include /> element to this XBlock's list of children
"""
self.authored_data_store.set(block, CHILDREN_INCLUDES, self.get_includes(block) + [parsed_include])
def delete(self, block, name):
"""
Reset the value of the field named `name` to the default
"""
self._check_field(block, name)
self.authored_data_store.set(block, CHILDREN_INCLUDES, [])
self.set(block, name, [])
| stvstnfrd/edx-platform | openedx/core/djangoapps/xblock/runtime/blockstore_field_data.py | Python | agpl-3.0 | 16,343 |
# Yith Library Server is a password storage server.
# Copyright (C) 2012-2013 Yaco Sistemas
# Copyright (C) 2012-2013 Alejandro Blanco Escudero <[email protected]>
# Copyright (C) 2012-2013 Lorenzo Gil Sanchez <[email protected]>
#
# This file is part of Yith Library Server.
#
# Yith Library Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Yith Library Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Yith Library Server. If not, see <http://www.gnu.org/licenses/>.
import logging
from yithlibraryserver.user.analytics import get_google_analytics
from yithlibraryserver.user.gravatar import get_gravatar
from yithlibraryserver.user.idp import add_identity_provider
from yithlibraryserver.user.models import User, ExternalIdentity
from yithlibraryserver.user.security import get_user
logger = logging.getLogger(__name__)
def includeme(config):
config.add_directive('add_identity_provider', add_identity_provider)
config.add_request_method(get_user, 'user', reify=True)
config.add_request_method(get_google_analytics,
'google_analytics', reify=True)
config.add_request_method(get_gravatar, 'gravatar', reify=True)
config.add_route('login', '/login')
config.add_route('register_new_user', '/register')
config.add_route('logout', '/logout')
config.add_route('user_destroy', '/destroy')
config.add_route('user_information', '/profile')
config.add_route('user_preferences', '/preferences')
config.add_route('user_identity_providers', '/identity-providers')
config.add_route('user_send_email_verification_code',
'/send-email-verification-code')
config.add_route('user_verify_email', '/verify-email')
config.add_route('user_google_analytics_preference',
'/google-analytics-preference')
config.add_route('user_view', '/user')
logger.debug('Importing %s model so SQLAlchemy knows about it', User)
logger.debug('Importing %s model so SQLAlchemy knows about it', ExternalIdentity)
| lorenzogil/yith-library-server | yithlibraryserver/user/__init__.py | Python | agpl-3.0 | 2,517 |
#
# XML Export, (C) Agaplan 2011
#
import models
import wizard
| jmesteve/saas3 | openerp/addons/xml_export/__init__.py | Python | agpl-3.0 | 64 |
####################################################################################################
#
# GroupedPurchaseOrder - A Django Application.
# Copyright (C) 2014 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
####################################################################################################
from django.core.urlresolvers import reverse, NoReverseMatch
from django.forms.utils import flatatt
from django.utils.html import escape, format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
####################################################################################################
from .html import join_text, merge_new_words, render_tag
####################################################################################################
#
# Notes:
# - How to concate in {% %} ? #deleteModal{{ supplier.pk }}
# - url 'suppliers.update' supplier.pk
#
####################################################################################################
####################################################################################################
def render_icon(icon, title=''):
"""Render a glyphicon.
"""
#? escape ?
# attrs = {'class': 'glyphicon glyphicon-{}'.format(icon)}
attrs = {'class': 'glyphicon glyphicon-' + icon}
if title:
attrs['title'] = _(title)
return format_html('<span{0}></span>', flatatt(attrs))
####################################################################################################
def render_button(content, icon=None, style='default', size='', href='', title='', button_class='', attrs=None):
"""Render a button with content
"""
# <button type="button" class="btn btn-default">Default</button>
# <button type="button" class="btn btn-primary">Primary</button>
# <button type="button" class="btn btn-success">Success</button>
# <button type="button" class="btn btn-info">Info</button>
# <button type="button" class="btn btn-warning">Warning</button>
# <button type="button" class="btn btn-danger">Danger</button>
# <button type="button" class="btn btn-link">Link</button>
#
# size : btn-lg, btn-sm, btn-xs
# <button type="button" class="btn btn-primary btn-lg">Large button</button>
#
# btn-block
# <button type="button" class="btn btn-primary btn-lg btn-block">Block level button</button>
# <button type="button" class="btn btn-default btn-lg btn-block">Block level button</button>
#
# active
# <button type="button" class="btn btn-primary btn-lg active">Primary button</button>
# <a href="#" class="btn btn-default btn-lg active" role="button">Link</a>
#
# disabled="disabled"
# <button type="button" class="btn btn-lg btn-primary" disabled="disabled">Primary button</button>
# <a href="#" class="btn btn-default btn-lg disabled" role="button">Link</a>
#
# <a class="btn btn-default" href="#" role="button">Link</a>
# <button class="btn btn-default" type="submit">Button</button>
# <input class="btn btn-default" type="button" value="Input">
# <input class="btn btn-default" type="submit" value="Submit">
if attrs is None:
attrs = {}
classes = ['btn']
button_styles = ('default', 'primary', 'success', 'info', 'warning', 'danger', 'link')
if style in button_styles:
classes.append('btn-' + style)
else:
raise ValueError('Parameter style must be {} ("{}" given)',
', '.join(button_styles), style)
# size = text_value(size).lower().strip()
if size:
if size == 'xs':
classes.append('btn-xs')
elif size == 'sm' or size == 'small':
classes.append('btn-sm')
elif size == 'lg' or size == 'large':
classes.append('btn-lg')
else:
raise ValueError('Parameter "size" should be "xs", "sm", "lg" or empty ("{}" given)',
format(size))
attrs['class'] = merge_new_words(button_class, classes)
if href:
try:
# current_app = context['request'].resolver_match.namespace
# viewname=viewname, args=view_args, kwargs=view_kwargs, current_app=current_app
url = reverse(href)
except NoReverseMatch:
url = href
attrs['href'] = url
tag = 'a'
else:
tag = 'button'
if title:
attrs['title'] = escape(_(title))
icon_content = render_icon(icon) if icon else ''
if content:
content = join_text((icon_content, escape(_(content))), separator=' ')
else:
content = icon_content
return render_tag(tag, mark_safe(content), attrs=attrs)
####################################################################################################
def render_icon_button(icon, **kwargs):
return render_button(None, icon=icon, **kwargs)
####################################################################################################
def render_modal_icon_button(icon, *args, **kwargs):
attrs = {'data-toggle':'modal', 'data-target':join_text(args)}
return render_button(None, icon=icon, attrs=attrs, **kwargs)
####################################################################################################
def render_dismiss_button(title, **kwargs):
attrs = {'type':'button', 'data-dismiss':'modal'}
return render_button(title, attrs=attrs, **kwargs)
####################################################################################################
def render_close_button(*args, **kwargs):
# '<button type="button" class="close" data-dismiss="modal">'
# '</button>'
attrs = {'type':'button', 'class':'close', 'data-dismiss':'modal'}
title = escape(_('Close'))
content = ('<span aria-hidden="true">×</span>'
'<span class="sr-only">{0}</span>'.format(title))
return render_tag('button', mark_safe(content), attrs=attrs)
####################################################################################################
#
# End
#
####################################################################################################
| FabriceSalvaire/grouped-purchase-order | GroupedPurchaseOrder/bootstrap/components.py | Python | agpl-3.0 | 6,955 |
# -*- encoding: utf-8 -*-
# © 2014 Elico Corp (https://www.elico-corp.com)
# Licence AGPL-3.0 or later(http://www.gnu.org/licenses/agpl.html)
{
'name': 'Purchase Price List Item',
'version': '7.0.1.0.0',
'category': 'Purchase',
'sequence': 19,
'summary': 'Purchase Price List Item',
'description': """
Improve purchase price managment
================================
* In Purchase List Item, the price is fixed based on price_surchage if base is 'fixed on UOP'
* If 'fixed on UOP', if product UOP change, the price list price will be change automtically.
* Add field 'Qty on Hand', and 'Stock Values' for product
* Add field 'Qty on Hand', 'Stock Values', UOP in product list view
""",
'author': 'Elico Corp',
'website': 'https://www.elico-corp.com',
'images' : [],
'depends': ['purchase'],
'data': [
'purchase_view.xml',
],
'test': [],
'demo': [],
'installable': True,
'auto_install': False,
'application': False,
} | Elico-Corp/openerp-7.0 | purchase_price_list_item/__openerp__.py | Python | agpl-3.0 | 1,073 |
from flask import Flask, request
from flask_bootstrap import Bootstrap
from flask_babel import Babel
import agherant
from webserver_utils import gevent_run
def create_app(conf):
app = Flask(__name__)
app.config.update(conf)
Bootstrap(app)
babel = Babel(app)
app.register_blueprint(agherant.agherant, url_prefix='/agherant')
@babel.localeselector
def get_locale():
return request.accept_languages.best_match(['en', 'it', 'sq'])
return app
def main(conf={}):
app = create_app(conf)
gevent_run(app)
if __name__ == '__main__':
main()
| insomnia-lab/libreant | webant/agherant_standalone.py | Python | agpl-3.0 | 591 |
import json
from decimal import Decimal
from django import forms
class MoneyField(forms.DecimalField):
def __init__(self, **kwargs):
kwargs["decimal_places"] = 2
for f in ["min_value", "max_value"]:
if f in kwargs:
kwargs[f] = Decimal(kwargs[f]) / 100
super().__init__(**kwargs)
def prepare_value(self, value):
if isinstance(value, int):
return Decimal(value) / 100
return value
def clean(self, value):
value = super().clean(value)
return value and int(value * 100)
class AskAmountField(forms.DecimalField):
def __init__(
self, *, amount_choices=None, show_tax_credit=True, by_month=False, **kwargs
):
self.show_tax_credit = show_tax_credit
self.by_month = by_month
self._amount_choices = amount_choices
super().__init__(**kwargs)
if self.min_value is not None:
self.widget.attrs.setdefault(
"data-min-amount-error", self.error_messages["min_value"]
)
if self.max_value is not None:
self.widget.attrs.setdefault(
"data-max-amount-error", self.error_messages["max_value"]
)
self.widget.attrs.setdefault("data-by-month", self.by_month)
@property
def amount_choices(self):
return self._amount_choices
@amount_choices.setter
def amount_choices(self, amount_choices):
self._amount_choices = amount_choices
if self.widget:
self.widget.attrs["data-amount-choices"] = json.dumps(self._amount_choices)
def widget_attrs(self, widget):
attrs = super().widget_attrs(widget)
if not self.show_tax_credit:
attrs.setdefault("data-hide-tax-credit", "Y")
if self.amount_choices is not None:
attrs.setdefault("data-amount-choices", json.dumps(self.amount_choices))
return attrs
| lafranceinsoumise/api-django | agir/donations/form_fields.py | Python | agpl-3.0 | 1,946 |
# -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015 OpenCraft <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
OpenStack - Tests
"""
# Imports #####################################################################
import requests
from collections import namedtuple
from unittest.mock import Mock, call, patch
from instance import openstack
from instance.tests.base import TestCase
# Tests #######################################################################
class OpenStackTestCase(TestCase):
"""
Test cases for OpenStack helper functions
"""
def setUp(self):
super().setUp()
self.nova = Mock()
def test_create_server(self):
"""
Create a VM via nova
"""
self.nova.flavors.find.return_value = 'test-flavor'
self.nova.images.find.return_value = 'test-image'
openstack.create_server(self.nova, 'test-vm', {"ram": 4096, "disk": 40}, {"name": "Ubuntu 12.04"})
self.assertEqual(self.nova.mock_calls, [
call.flavors.find(disk=40, ram=4096),
call.images.find(name='Ubuntu 12.04'),
call.servers.create('test-vm', 'test-image', 'test-flavor', key_name=None)
])
def test_delete_servers_by_name(self):
"""
Delete all servers with a given name
"""
server_class = namedtuple('server_class', 'name pk')
self.nova.servers.list.return_value = [
server_class(name='server-a', pk=1),
server_class(name='server-a', pk=2),
server_class(name='server-b', pk=3),
]
openstack.delete_servers_by_name(self.nova, 'server-a')
self.assertEqual(self.nova.mock_calls, [
call.servers.list(),
call.servers.delete(server_class(name='server-a', pk=1)),
call.servers.delete(server_class(name='server-a', pk=2)),
])
def test_get_server_public_address_none(self):
"""
No public IP when none has been assigned yet
"""
server_class = namedtuple('Server', 'addresses')
server = server_class(addresses=[])
self.assertEqual(openstack.get_server_public_address(server), None)
@patch('requests.packages.urllib3.util.retry.Retry.sleep')
@patch('http.client.HTTPConnection.getresponse')
@patch('http.client.HTTPConnection.request')
def test_nova_client_connection_error(self, mock_request, mock_getresponse, mock_retry_sleep):
"""
Connection error during a request from the nova client
Ensure requests are retried before giving up, with a backoff sleep between attempts
"""
def getresponse_call(*args, **kwargs):
""" Invoked by the nova client when making a HTTP request (via requests/urllib3) """
raise ConnectionResetError('[Errno 104] Connection reset by peer')
mock_getresponse.side_effect = getresponse_call
nova = openstack.get_nova_client()
with self.assertRaises(requests.exceptions.ConnectionError):
nova.servers.get('test-id')
self.assertEqual(mock_getresponse.call_count, 11)
self.assertEqual(mock_retry_sleep.call_count, 10)
| omarkhan/opencraft | instance/tests/test_openstack.py | Python | agpl-3.0 | 3,877 |
# Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from UM.Math.Float import Float # For fuzzy comparison of edge cases.
## Represents a line segment in 2D.
#
# The line segment is represented by two endpoints.
class LineSegment(object):
## Creates a new line segment with the specified endpoints.
#
# \param endpoint_a An endpoint of the line segment.
# \param endpoint_b An endpoint of the line segment.
def __init__(self, endpoint_a, endpoint_b):
self._endpoint_a = endpoint_a
self._endpoint_b = endpoint_b
## Gets the second endpoint (B) of the line segment.
#
# \return The second endpoint of the line segment.
def getEnd(self):
return self._endpoint_b
## Gets the first endpoint (A) of the line segment.
#
# \return The first endpoint of the line segment.
def getStart(self):
return self._endpoint_a
## Returns the point of intersection of this line segment with another line
# segment, if any.
#
# \param other The line segment to check intersection with.
# \return The intersection point if they intersect, or None otherwise.
def intersection(self, other):
if not self.intersectsWithLine(other._endpoint_a, other._endpoint_b) or not other.intersectsWithLine(self._endpoint_a, self._endpoint_b): #Line segments don't intersect.
return None
direction_me = self._endpoint_b - self._endpoint_a
direction_other = other._endpoint_b - other._endpoint_a
diff_endpoint_a = self._endpoint_a - other._endpoint_a
perpendicular = direction_me.perpendicular()
denominator = perpendicular.dot(direction_other) #Project onto the perpendicular.
numerator = perpendicular.dot(diff_endpoint_a)
if denominator == 0: #Lines are parallel.
return None
return (numerator / denominator.astype(float)) * direction_other + other._endpoint_a
## Returns whether the line segment intersects the specified (infinite)
# line.
#
# If the line segment touches the line with one or both endpoints, that
# counts as an intersection too.
#
# \param a A point on the line to intersect with.
# \param b A different point on the line to intersect with.
# \return True if the line segment intersects with the line, or False
# otherwise.
def intersectsWithLine(self, a, b):
shifted_b = b - a
#It intersects if either endpoint is on the line, or if one endpoint is on the right but the other is not.
return Float.fuzzyCompare(shifted_b.cross(self._endpoint_a), 0) or Float.fuzzyCompare(shifted_b.cross(self._endpoint_b), 0) or (self._pointIsRight(self._endpoint_a, a, b) != self._pointIsRight(self._endpoint_b, a, b))
## Determines whether point p is to the right of the line through a and b.
#
# \param p The point to determine whether it is to the right of the line.
# \param a A point on the line.
# \param b Another point on the line.
def _pointIsRight(self, p, a, b):
shifted_end = b - a
return shifted_end.cross(p - a) < 0 | onitake/Uranium | UM/Math/LineSegment.py | Python | agpl-3.0 | 3,205 |
import os
#: The title of this site
SITE_TITLE = 'Job Board'
#: Database backend
SQLALCHEMY_DATABASE_URI = 'postgresql:///hasjob_testing'
SERVER_NAME = 'hasjob.travis.local:5000'
#: LastUser server
LASTUSER_SERVER = 'https://hasgeek.com/'
#: LastUser client id
LASTUSER_CLIENT_ID = os.environ.get('LASTUSER_CLIENT_ID', '')
#: LastUser client secret
LASTUSER_CLIENT_SECRET = os.environ.get('LASTUSER_CLIENT_SECRET', '')
STATIC_SUBDOMAIN = 'static'
ASSET_SERVER = 'https://static.hasgeek.co.in/'
ASSET_MANIFEST_PATH = "static/build/manifest.json"
# no trailing slash
ASSET_BASE_PATH = '/static/build'
| hasgeek/hasjob | instance/testing.py | Python | agpl-3.0 | 602 |
# -*- coding: utf-8 -*-
from .pagemodels import *
from .catalogmodels import *
from .utilmodels import *
from .usermodels import *
from .dbconnect import Base, engine
def init_models():
Base.metadata.create_all(engine)
| web-izmerenie/avto-lux161 | avto-lux/app/models/init_models.py | Python | agpl-3.0 | 222 |
# Copyright © 2019 José Alberto Orejuela García (josealberto4444)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import configparser
import datetime
import json
import os.path
import re
import requests
import youtube_dl
def read_config(section, key):
config = configparser.ConfigParser()
config.read('config.cfg')
return config[section][key]
def is_invalid(date):
try:
datetime.datetime.strptime(date, '%Y-%m-%d')
except ValueError:
return "Incorrect date format, should be YYYY-MM-DD."
else:
return False
class Apod:
def __init__(self, *args):
self.api_key = read_config('NASA_API', 'api_key')
if args:
self.date = args[0]
else:
self.date = ''
self.date = self.ask_api()['date']
self.filename = 'data/' + self.date
self.error = False
self.consult()
if not self.error:
self.title = self.api_response['title']
self.media_type = self.api_response['media_type']
if self.media_type == 'image':
self.hdlink = self.api_response['hdurl']
self.link = self.api_response['url']
self.explanation()
def ask_api(self):
baseurl = 'https://api.nasa.gov/planetary/apod'
payload = {'api_key': self.api_key, 'date': self.date}
r = requests.get(baseurl, params=payload)
return r.json()
def consult(self):
if os.path.exists('data/' + self.date + '.json'):
with open(self.filename + '.json', 'rt') as f:
self.api_response = json.load(f)
else:
self.api_response = self.ask_api()
if 'code' in self.api_response:
if self.api_response['code'] == 400:
self.error = self.api_response['msg']
else:
self.error = self.api_response['code'] + ': ' + self.api_response['msg']
else:
with open(self.filename + '.json', 'wt') as f:
json.dump(self.api_response, f)
def get_userpage(self):
shortdate = self.date.replace('-', '')
shortdate = shortdate[2:]
url = 'https://apod.nasa.gov/apod/ap' + shortdate + '.html'
payload = {}
r = requests.get(url, params=payload)
return r.text
def scrap_explanation(self, pagesource):
re_explanation = re.compile("Explanation: </b>(.*?)<p>", flags=re.DOTALL) # Compile regex for extracting explanation.
explanation = re_explanation.search(pagesource).groups()[0] # Extract explanation.
explanation = explanation.replace('/\n', '/') # Fix split URLs along several lines.
explanation = explanation.replace('\n>', '>') # Fix split HTML tags.
explanation = explanation.replace('<a/>', '</a>') # Fix typos (they seem to write the HTML by hand, yes).
explanation = explanation.replace('\n', ' ') # Delete all newlines.
explanation = re.sub('\s+', ' ', explanation).strip() # Substitute repeated spaces and strips the ones at the beginning and the end of the string.
explanation = re.sub(r'<a([^>]*)href=["\'](?!http)([^"\']*)["\']([^>]*)>', r'<a\1href="https://apod.nasa.gov/apod/\2"\3>', explanation) # Change relative paths to absolute.
return explanation
def save_explanation(self, explanation):
with open(self.filename + '.html', 'wt') as f:
f.write(explanation)
def explanation(self):
filename = self.filename + '.html'
if os.path.exists(filename):
with open(filename, 'rt') as f:
self.explanation = f.read()
self.html = True
else:
try:
userpage = self.get_userpage()
explanation = self.scrap_explanation(userpage)
except:
explanation = self.api_response['explanation']
self.html = False
else:
self.save_explanation(explanation)
self.html = True
self.explanation = explanation
# TO-DO: Check if already downloaded first
# def download_media():
# if self.media_type == 'image':
# link = self.api_response['hdurl']
# r = requests.get(link)
# extension = os.path.splitext(link)[1]
# filename = self.filename + extension
# with open(filename, 'wb') as f:
# for chunk in r.iter_content(chunk_size=128):
# f.write(chunk)
# elif self.media_type == 'video':
# filename = self.filename + '.%(ext)s'
# ydl_opts = {'outtmpl': filename, 'quiet': True}
# with youtube_dl.YoutubeDL(ydl_opts) as ydl:
# ydl.download([api_response['url']])
| josealberto4444/ApodNasaBot | api.py | Python | agpl-3.0 | 5,412 |
#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
import numpy as np
class TestTriangularBarkBands(TestCase):
def InitTriangularBarkBands(self, nbands):
return TriangularBarkBands(inputSize=1024,
numberBands=nbands,
lowFrequencyBound=0,
highFrequencyBound=44100*.5)
def testRegression(self):
spectrum = [1]*1024
mbands = self.InitTriangularBarkBands(24)(spectrum)
self.assertEqual(len(mbands), 24 )
self.assert_(not any(numpy.isnan(mbands)))
self.assert_(not any(numpy.isinf(mbands)))
self.assertAlmostEqualVector(mbands, [1]*24, 1e-5)
mbands = self.InitTriangularBarkBands(128)(spectrum)
self.assertEqual(len(mbands), 128 )
self.assert_(not any(numpy.isnan(mbands)))
self.assert_(not any(numpy.isinf(mbands)))
self.assertAlmostEqualVector(mbands, [1]*128, 1e-5)
def testRegressionRastaMode(self):
# Test the BFCC extractor compared to Rastamat specifications
audio = MonoLoader(filename = join(testdata.audio_dir, 'recorded/vignesh.wav'),
sampleRate = 44100)()*2**15
#Expected values generated in Rastamat/MATLAB
expected = [ 20.28919141, 23.80362425, 26.69797305, 27.10461133, 26.64508125,
26.7758322, 27.1787682, 27.10699792, 26.29040982, 25.04243486,
24.24791966, 24.17377063, 24.61976518, 25.29554584, 24.87617598,
23.79018513, 23.04026225, 23.20707811, 23.09716777, 23.33050168,
22.8201923, 21.49477903, 21.63639095, 22.12937291, 22.01981441,
21.70728156]
frameSize = 1102
hopSize = 441
fftsize = 2048
paddingSize = fftsize - frameSize
spectrumSize = int(fftsize/2) + 1
w = Windowing(type = 'hann',
size = frameSize,
zeroPadding = paddingSize,
normalized = False,
zeroPhase = False)
spectrum = Spectrum(size = fftsize)
mbands = TriangularBarkBands(inputSize= spectrumSize,
type = 'power',
highFrequencyBound = 8000,
lowFrequencyBound = 0,
numberBands = 26,
weighting = 'linear',
normalize = 'unit_max')
pool = Pool()
for frame in FrameGenerator(audio, frameSize = frameSize, hopSize = hopSize, startFromZero = True, validFrameThresholdRatio = 1):
pool.add('TriangularBarkBands', mbands(spectrum(w(frame))))
np.savetxt("out.csv", np.mean(np.log(pool['TriangularBarkBands']),0), delimiter=',')
self.assertAlmostEqualVector( np.mean(np.log(pool['TriangularBarkBands']),0), expected,1e-2)
def testZero(self):
# Inputting zeros should return zero. Try with different sizes
size = 1024
while (size >= 256 ):
self.assertEqualVector(TriangularBarkBands()(zeros(size)), zeros(24))
size /= 2
def testInvalidInput(self):
# mel bands should fail for a spectrum with less than 2 bins
self.assertComputeFails(TriangularBarkBands(), [])
self.assertComputeFails(TriangularBarkBands(), [0.5])
def testInvalidParam(self):
self.assertConfigureFails(TriangularBarkBands(), { 'numberBands': 0 })
self.assertConfigureFails(TriangularBarkBands(), { 'numberBands': 1 })
self.assertConfigureFails(TriangularBarkBands(), { 'lowFrequencyBound': -100 })
self.assertConfigureFails(TriangularBarkBands(), { 'lowFrequencyBound': 100,
'highFrequencyBound': 50 })
self.assertConfigureFails(TriangularBarkBands(), { 'highFrequencyBound': 30000,
'sampleRate': 22050})
def testWrongInputSize(self):
# This test makes sure that even though the inputSize given at
# configure time does not match the input spectrum, the algorithm does
# not crash and correctly resizes internal structures to avoid errors.
spec = [.1,.4,.5,.2,.1,.01,.04]*100
np.savetxt("out.csv", TriangularBarkBands(inputSize=1024, sampleRate=10, highFrequencyBound=4)(spec), delimiter=',')
self.assertAlmostEqualVector(
TriangularBarkBands(inputSize=1024, sampleRate=10, highFrequencyBound=4)(spec),
[0.0460643246769905]*24,
1e-6)
"""
def testNotEnoughSpectrumBins(self):
self.assertConfigureFails(TriangularBarkBands(), {'numberBands': 256,
'inputSize': 1025})
"""
suite = allTests(TestTriangularBarkBands)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
| carthach/essentia | test/src/unittests/spectral/test_triangularbarkbands.py | Python | agpl-3.0 | 5,769 |
class Location(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __add__(self, direction):
return Location(self.x + direction.x, self.y + direction.y)
def __sub__(self, direction):
return Location(self.x - direction.x, self.y - direction.y)
def __repr__(self):
return 'Location({}, {})'.format(self.x, self.y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __hash__(self):
return hash((self.x, self.y))
| Spycho/aimmo | aimmo-game-worker/simulation/location.py | Python | agpl-3.0 | 529 |
Subsets and Splits