repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
apmichaud/vitess-apm | test/queryservice_test.py | 1 | 2644 | #!/usr/bin/env python
import logging
import optparse
import traceback
import unittest
import sys
import utils
import framework
from queryservice_tests import cache_tests
from queryservice_tests import nocache_tests
from queryservice_tests import stream_tests
from queryservice_tests import status_tests
from queryservice_tests import test_env
if __name__ == "__main__":
parser = optparse.OptionParser(usage="usage: %prog [options] [test_names]")
parser.add_option("-m", "--memcache", action="store_true", default=False,
help="starts a memcache d, and tests rowcache")
parser.add_option("-e", "--env", default='vttablet,vtocc',
help="Environment that will be used. Valid options: vttablet, vtocc")
parser.add_option("-q", "--quiet", action="store_const", const=0, dest="verbose", default=1)
parser.add_option("-v", "--verbose", action="store_const", const=2, dest="verbose", default=0)
(options, args) = parser.parse_args()
utils.options = options
logging.getLogger().setLevel(logging.ERROR)
suite = unittest.TestSuite()
if args:
for arg in args:
if hasattr(nocache_tests.TestNocache, arg):
suite.addTest(nocache_tests.TestNocache(arg))
elif hasattr(stream_tests.TestStream, arg):
suite.addTest(stream_tests.TestStream(arg))
elif hasattr(cache_tests.TestCache, arg) and options.memcache:
suite.addTest(cache_tests.TestCache(arg))
elif hasattr(cache_tests.TestWillNotBeCached, arg) and options.memcache:
suite.addTest(cache_tests.TestWillNotBeCached(arg))
else:
raise Exception(arg, "not found in tests")
else:
modules = [nocache_tests, stream_tests, status_tests]
if options.memcache:
modules.append(cache_tests)
for m in modules:
suite.addTests(unittest.TestLoader().loadTestsFromModule(m))
try:
for env_name in options.env.split(','):
try:
if env_name == 'vttablet':
env = test_env.VttabletTestEnv()
elif env_name == 'vtocc':
env = test_env.VtoccTestEnv()
else:
raise Exception("Valid options for -e: vtocc, vttablet")
env.memcache = options.memcache
env.setUp()
print "Starting queryservice_test.py: %s" % env_name
sys.stdout.flush()
framework.TestCase.setenv(env)
result = unittest.TextTestRunner(verbosity=options.verbose).run(suite)
if not result.wasSuccessful():
raise Exception("test failures")
finally:
try:
env.tearDown()
except:
traceback.print_exc()
finally:
utils.remove_tmp_files()
| bsd-3-clause | 2,655,012,504,257,989,000 | 33.789474 | 96 | 0.664145 | false |
pavelponomarev/Elmer_IM2D_cases | IM_one_pole/cage/cage_generator.py | 1 | 9486 | # Elmer circuit equations generator for a cage winding taking into account periodicity
# Author: P. Ponomarev
# July 2016
# changelog:
# version 1.3 (03.2017) by PP:
# - added 'boffset' parameter - offset of the body numbers
# version 1.2 (01.2017) by PP:
# - added 'ns' parameter - number of slices for multi-slice model
from __future__ import print_function
# Settings:
ns = 1
nob = 10 # number of rotor bars simulated
boffset = 1 # number of the first bar body
antiperiodic = 1 # periodic or antiperiodic boundary
cn = 4 # circuit number which describes the rotor bars
ctype = "Stranded" # Coil type Massive/Stranded
OUTFILE = 'cage.definitions'
# Rotor circuit
# Bar 1 to Bar N are FEM components of the modelled domain
# L_N and R_N are bar-to-bar inductance and resistance of the rotor end rings
# For 1-pole model (antiperiodic):
# terminal 1 is connected to 2' and 2 is connected to 1'
# For 2-pole model (periodic):
# terminal 1 is connected to 1' and 2 is connected to 2'
# (i,v)
# 1' (0,1) 2'
# O + _________ - I_bar O
# |________| Bar 1 |____\____|
# + | |_________| / |
# C C +
# C L_1l _____________ C L_1r
# C | U_loop | C
# | (4,5) | | | (2,3)
# < | \|/ <
# < R_1l |___ V < R_1r
# < <
# - | + _________ - |-
# |________| Bar 2 |_________|
# | |_________| |
# C (6,7) C
# C L_2l C L_2r
# C C
# | (10,11) | (8,9)
# < <
# < R_2l < R_2r
# < <
# | |
#
# ...
# _________
# |________| Bar N |_________|
# | |_________| |
# C C
# C L_Nl C L_Nr
# C C
# | |
# < <
# < R_Nl < R_Nr
# < <
# | |
# O O
# 1 2
barstxt = ""
###############################################################################
### Filling components section
###############################################################################
# Coil Type can be Massive or Stranded
# assuming that rotor bar bodies are numbered
# consequently starting from 1 onwards to N, where 1 and N are closest to
# periodic boundaries bars:
for nbar in range(1,nob+1):
s = "Component " + str(nbar) + "\n" + \
" Name = String RB" + str(nbar) + "\n" + \
" Body = Integer " + str(nbar+boffset-1) + "\n" + \
" Coil Type = String "+ ctype + "\n" + \
" Number of Turns = Real 1" + "\n" + \
"End" + "\n\n"
barstxt = barstxt + s
###############################################################################
### Declare variables
###############################################################################
# first, the dimensions of the variable arrays are declared
s = "!----------------------------------------------------------\n" + \
"! Equations for " + str(nob) + " rotor bars\n" + \
"!----------------------------------------------------------\n\n" + \
"$ C." + str(cn) + ".source.1 = 0\n\n" + \
"! init matrices of Ax' + Bx = Source\n" + \
"$ C." + str(cn) + ".variables = " + str(nob*3*2)+ "\n" + \
"$ C." + str(cn) + ".perm = zeros(" + str(nob*3*2)+ ")\n" + \
"$ C." + str(cn) + ".A = zeros(" + str(nob*3*2) + ", " + str(nob*3*2) + ")\n" + \
"$ C." + str(cn) + ".B = zeros(" + str(nob*3*2) + ", " + str(nob*3*2) + ")\n" + \
"$ C." + str(cn) + ".Mre = zeros(" + str(nob*3*2) + ", " + str(nob*3*2) + ")\n" + \
"$ C." + str(cn) + ".Mim = zeros(" + str(nob*3*2) + ", " + str(nob*3*2) + ")\n" + \
"! define circuit variables\n\n"
barstxt = barstxt + s
# then, each variable receives its unique name
# each component and element is described by 2 circuit variables - "u" and "i"
# each bar is associated with 2 sections of the end ring - left (l) and right (r)
# each section is described by one single element of the circuit possesing R and L.
for nbar in range(0,nob):
s = "$ C." + str(cn) + ".name." + str(nbar*6 + 1) + " = \"i_component(" + str(nbar+1) + ")\"\n" + \
"$ C." + str(cn) + ".name." + str(nbar*6 + 2) + " = \"v_component(" + str(nbar+1) + ")\"\n" + \
"$ C." + str(cn) + ".name." + str(nbar*6 + 3) + " = \"i_r" + str(nbar+1) + "\"\n" + \
"$ C." + str(cn) + ".name." + str(nbar*6 + 4) + " = \"v_r" + str(nbar+1) + "\"\n" + \
"$ C." + str(cn) + ".name." + str(nbar*6 + 5) + " = \"i_l" + str(nbar+1) + "\"\n" + \
"$ C." + str(cn) + ".name." + str(nbar*6 + 6) + " = \"v_l" + str(nbar+1) + "\"\n\n\n"
barstxt = barstxt + s
###############################################################################
### Kirchoff voltage law
###############################################################################
# describes voltages in each loop between two bars. Hence, each circuit segment
# contains 4 components(elements)
# loops directed clockwise
s = "! Kirchoff voltage law\n\n"
barstxt = barstxt + s
for nbar in range(0,nob-1):
s = "!Bar" + str(nbar+1) + "\n" + \
"$ C." + str(cn) + ".B(" + str(nbar*6+2) + "," + str(nbar*6+1) + ") = 1/" + str(ns) + "\n" + \
"$ C." + str(cn) + ".B(" + str(nbar*6+2) + "," + str(nbar*6+3) + ") = 1\n" + \
"$ C." + str(cn) + ".B(" + str(nbar*6+2) + "," + str(nbar*6+5) + ") = -1\n" + \
"$ C." + str(cn) + ".B(" + str(nbar*6+2) + "," + str(nbar*6+7) + ") = -1/" + str(ns) + "\n\n"
barstxt = barstxt + s
# last bar includes periodicity definition
s = "!Bar" + str(nob) + "\n" + \
"$ C." + str(cn) + ".B(" + str((nob-1)*6+2) + "," + str((nob-1)*6+1) + ") = 1/" + str(ns) + "\n" + \
"$ C." + str(cn) + ".B(" + str((nob-1)*6+2) + "," + str((nob-1)*6+3) + ") = 1\n" + \
"$ C." + str(cn) + ".B(" + str((nob-1)*6+2) + "," + str((nob-1)*6+5) + ") = -1\n" + \
"$ C." + str(cn) + ".B(" + str((nob-1)*6+2) + "," + str(1) + ") = " + str(1 if antiperiodic==1 else -1) +"/" + str(ns) + "\n\n\n"
barstxt = barstxt + s
###############################################################################
### Kirchoff current law
###############################################################################
# each bar is connected to two knots -- left and right
s = "! Kirchoff current law\n\n"
barstxt = barstxt + s
# bar 1 knots contain periodicity information
s = "!Bar" + str(1) + " right knot\n" + \
"$ C." + str(cn) + ".B(" + str(0+0) + "," + str(0+0) + ") = 1\n" + \
"$ C." + str(cn) + ".B(" + str(0+0) + "," + str(nob*6-(2 if antiperiodic == 1 else 4)) + ") = 1\n" + \
"$ C." + str(cn) + ".B(" + str(0+0) + "," + str(0+2) + ") = -1\n" + \
"!Bar" + str(1) + " left knot\n" + \
"$ C." + str(cn) + ".B(" + str(0+4) + "," + str(0+4) + ") = -1\n" + \
"$ C." + str(cn) + ".B(" + str(0+4) + "," + str(nob*6-(4 if antiperiodic == 1 else 2)) + ") = 1\n" + \
"$ C." + str(cn) + ".B(" + str(0+4) + "," + str(0+0) + ") = -1\n\n"
barstxt = barstxt + s
# other bars are composed similarly
for nbar in range(1,nob):
s = "!Bar" + str(nbar+1) + " right knot\n" + \
"$ C." + str(cn) + ".B(" + str(nbar*6+0) + "," + str(nbar*6+0) + ") = 1\n" + \
"$ C." + str(cn) + ".B(" + str(nbar*6+0) + "," + str(nbar*6-4) + ") = 1\n" + \
"$ C." + str(cn) + ".B(" + str(nbar*6+0) + "," + str(nbar*6+2) + ") = -1\n" + \
"!Bar" + str(nbar+1) + " left knot\n" + \
"$ C." + str(cn) + ".B(" + str(nbar*6+4) + "," + str(nbar*6+4) + ") = -1\n" + \
"$ C." + str(cn) + ".B(" + str(nbar*6+4) + "," + str(nbar*6-2) + ") = 1\n" + \
"$ C." + str(cn) + ".B(" + str(nbar*6+4) + "," + str(nbar*6+0) + ") = -1\n\n"
barstxt = barstxt + s
###############################################################################
### Elemental equations
###############################################################################
# these equations describe R and L elements in the circuit
# v = vr+vl
# v -iR - Li' = 0
s = "! Elemental equations\n\n"
barstxt = barstxt + s
for nbar in range(0,nob):
s = "$ C." + str(cn) + ".B(" + str(nbar*6+3) + "," + str(nbar*6+3) + ") = -1\n" + \
"$ C." + str(cn) + ".B(" + str(nbar*6+3) + "," + str(nbar*6+2) + ") = R_er\n" + \
"$ C." + str(cn) + ".A(" + str(nbar*6+3) + "," + str(nbar*6+2) + ") = L_er\n" + \
"$ C." + str(cn) + ".B(" + str(nbar*6+5) + "," + str(nbar*6+5) + ") = -1\n" + \
"$ C." + str(cn) + ".B(" + str(nbar*6+5) + "," + str(nbar*6+4) + ") = R_er\n" + \
"$ C." + str(cn) + ".A(" + str(nbar*6+5) + "," + str(nbar*6+4) + ") = L_er\n\n"
barstxt = barstxt + s
with open(OUTFILE, 'w+') as f:
f.write(barstxt)
print('Cage circuit equations for circuit number', cn,
'with', ns, 'slices',
'for', nob, 'bars with',
'antiperiodic' if antiperiodic == 1 else 'periodic',
'boundary conditions are saved to', OUTFILE)
| gpl-3.0 | -1,045,329,439,116,759,700 | 43.12093 | 133 | 0.37666 | false |
oddt/oddt | oddt/docking/AutodockVina.py | 1 | 15674 | import sys
import subprocess
import re
import os
import warnings
from tempfile import mkdtemp
from shutil import rmtree
from distutils.spawn import find_executable
from tempfile import gettempdir
from six import string_types
import oddt
from oddt.utils import (is_openbabel_molecule,
is_molecule,
check_molecule)
from oddt.spatial import rmsd
class autodock_vina(object):
def __init__(self,
protein=None,
auto_ligand=None,
size=(20, 20, 20),
center=(0, 0, 0),
exhaustiveness=8,
num_modes=9,
energy_range=3,
seed=None,
prefix_dir=None,
n_cpu=1,
executable=None,
autocleanup=True,
skip_bad_mols=True):
"""Autodock Vina docking engine, which extends it's capabilities:
automatic box (auto-centering on ligand).
Other software compatible with Vina API can also be used (e.g. QuickVina).
Parameters
----------
protein: oddt.toolkit.Molecule object (default=None)
Protein object to be used while generating descriptors.
auto_ligand: oddt.toolkit.Molecule object or string (default=None)
Ligand use to center the docking box. Either ODDT molecule or
a file (opened based on extension and read to ODDT molecule).
Box is centered on geometric center of molecule.
size: tuple, shape=[3] (default=(20, 20, 20))
Dimensions of docking box (in Angstroms)
center: tuple, shape=[3] (default=(0,0,0))
The center of docking box in cartesian space.
exhaustiveness: int (default=8)
Exhaustiveness parameter of Autodock Vina
num_modes: int (default=9)
Number of conformations generated by Autodock Vina. The maximum
number of docked poses is 9 (due to Autodock Vina limitation).
energy_range: int (default=3)
Energy range cutoff for Autodock Vina
seed: int or None (default=None)
Random seed for Autodock Vina
prefix_dir: string or None (default=None)
Temporary directory for Autodock Vina files.
By default (None) system temporary directory is used,
for reference see `tempfile.gettempdir`.
executable: string or None (default=None)
Autodock Vina executable location in the system.
It's really necessary if autodetection fails.
autocleanup: bool (default=True)
Should the docking engine clean up after execution?
skip_bad_mols: bool (default=True)
Should molecules that crash Autodock Vina be skipped.
"""
self.dir = prefix_dir or gettempdir()
self._tmp_dir = None
# define binding site
self.size = size
self.center = center
# center automaticaly on ligand
if auto_ligand:
if isinstance(auto_ligand, string_types):
extension = auto_ligand.split('.')[-1]
auto_ligand = next(oddt.toolkit.readfile(extension, auto_ligand))
self.center = auto_ligand.coords.mean(axis=0).round(3)
# autodetect Vina executable
if not executable:
self.executable = find_executable('vina')
if not self.executable:
raise Exception('Could not find Autodock Vina binary.'
'You have to install it globally or supply binary'
'full directory via `executable` parameter.')
else:
self.executable = executable
# detect version
self.version = (subprocess.check_output([self.executable, '--version'])
.decode('ascii').split(' ')[2])
self.autocleanup = autocleanup
self.cleanup_dirs = set()
# share protein to class
self.protein = None
self.protein_file = None
if protein:
self.set_protein(protein)
self.skip_bad_mols = skip_bad_mols
self.n_cpu = n_cpu
if self.n_cpu > exhaustiveness:
warnings.warn('Exhaustiveness is lower than n_cpus, thus CPU will '
'not be saturated.')
# pregenerate common Vina parameters
self.params = []
self.params += ['--center_x', str(self.center[0]),
'--center_y', str(self.center[1]),
'--center_z', str(self.center[2])]
self.params += ['--size_x', str(self.size[0]),
'--size_y', str(self.size[1]),
'--size_z', str(self.size[2])]
self.params += ['--exhaustiveness', str(exhaustiveness)]
if seed is not None:
self.params += ['--seed', str(seed)]
if num_modes > 9 or num_modes < 1:
raise ValueError('The number of docked poses must be between 1 and 9'
' (due to Autodock Vina limitation).')
self.params += ['--num_modes', str(num_modes)]
self.params += ['--energy_range', str(energy_range)]
@property
def tmp_dir(self):
if not self._tmp_dir:
self._tmp_dir = mkdtemp(dir=self.dir, prefix='autodock_vina_')
self.cleanup_dirs.add(self._tmp_dir)
return self._tmp_dir
@tmp_dir.setter
def tmp_dir(self, value):
self._tmp_dir = value
def set_protein(self, protein):
"""Change protein to dock to.
Parameters
----------
protein: oddt.toolkit.Molecule object
Protein object to be used.
"""
# generate new directory
self._tmp_dir = None
if protein:
if isinstance(protein, string_types):
extension = protein.split('.')[-1]
if extension == 'pdbqt':
self.protein_file = protein
self.protein = next(oddt.toolkit.readfile(extension, protein))
self.protein.protein = True
else:
self.protein = next(oddt.toolkit.readfile(extension, protein))
self.protein.protein = True
else:
self.protein = protein
# skip writing if we have PDBQT protein
if self.protein_file is None:
self.protein_file = write_vina_pdbqt(self.protein, self.tmp_dir,
flexible=False)
def score(self, ligands, protein=None):
"""Automated scoring procedure.
Parameters
----------
ligands: iterable of oddt.toolkit.Molecule objects
Ligands to score
protein: oddt.toolkit.Molecule object or None
Protein object to be used. If None, then the default
one is used, else the protein is new default.
Returns
-------
ligands : array of oddt.toolkit.Molecule objects
Array of ligands (scores are stored in mol.data method)
"""
if protein:
self.set_protein(protein)
if not self.protein_file:
raise IOError("No receptor.")
if is_molecule(ligands):
ligands = [ligands]
ligand_dir = mkdtemp(dir=self.tmp_dir, prefix='ligands_')
output_array = []
for n, ligand in enumerate(ligands):
check_molecule(ligand, force_coords=True)
ligand_file = write_vina_pdbqt(ligand, ligand_dir, name_id=n)
try:
scores = parse_vina_scoring_output(
subprocess.check_output([self.executable, '--score_only',
'--receptor', self.protein_file,
'--ligand', ligand_file] + self.params,
stderr=subprocess.STDOUT))
except subprocess.CalledProcessError as e:
sys.stderr.write(e.output.decode('ascii'))
if self.skip_bad_mols:
continue
else:
raise Exception('Autodock Vina failed. Command: "%s"' %
' '.join(e.cmd))
ligand.data.update(scores)
output_array.append(ligand)
rmtree(ligand_dir)
return output_array
def dock(self, ligands, protein=None):
"""Automated docking procedure.
Parameters
----------
ligands: iterable of oddt.toolkit.Molecule objects
Ligands to dock
protein: oddt.toolkit.Molecule object or None
Protein object to be used. If None, then the default one
is used, else the protein is new default.
Returns
-------
ligands : array of oddt.toolkit.Molecule objects
Array of ligands (scores are stored in mol.data method)
"""
if protein:
self.set_protein(protein)
if not self.protein_file:
raise IOError("No receptor.")
if is_molecule(ligands):
ligands = [ligands]
ligand_dir = mkdtemp(dir=self.tmp_dir, prefix='ligands_')
output_array = []
for n, ligand in enumerate(ligands):
check_molecule(ligand, force_coords=True)
ligand_file = write_vina_pdbqt(ligand, ligand_dir, name_id=n)
ligand_outfile = ligand_file[:-6] + '_out.pdbqt'
try:
scores = parse_vina_docking_output(
subprocess.check_output([self.executable, '--receptor',
self.protein_file,
'--ligand', ligand_file,
'--out', ligand_outfile] +
self.params +
['--cpu', str(self.n_cpu)],
stderr=subprocess.STDOUT))
except subprocess.CalledProcessError as e:
sys.stderr.write(e.output.decode('ascii'))
if self.skip_bad_mols:
continue # TODO: print some warning message
else:
raise Exception('Autodock Vina failed. Command: "%s"' %
' '.join(e.cmd))
# docked conformations may have wrong connectivity - use source ligand
if is_openbabel_molecule(ligand):
# find the order of PDBQT atoms assigned by OpenBabel
with open(ligand_file) as f:
write_order = [int(line[7:12].strip())
for line in f
if line[:4] == 'ATOM']
new_order = sorted(range(len(write_order)),
key=write_order.__getitem__)
new_order = [i + 1 for i in new_order] # OBMol has 1 based idx
assert len(new_order) == len(ligand.atoms)
docked_ligands = oddt.toolkit.readfile('pdbqt', ligand_outfile)
for docked_ligand, score in zip(docked_ligands, scores):
# Renumber atoms to match the input ligand
if is_openbabel_molecule(docked_ligand):
docked_ligand.OBMol.RenumberAtoms(new_order)
# HACK: copy docked coordinates onto source ligand
# We assume that the order of atoms match between ligands
clone = ligand.clone
clone.clone_coords(docked_ligand)
clone.data.update(score)
# Calculate RMSD to the input pose
try:
clone.data['vina_rmsd_input'] = rmsd(ligand, clone)
clone.data['vina_rmsd_input_min'] = rmsd(ligand, clone,
method='min_symmetry')
except Exception:
pass
output_array.append(clone)
rmtree(ligand_dir)
return output_array
def clean(self):
for d in self.cleanup_dirs:
rmtree(d)
def predict_ligand(self, ligand):
"""Local method to score one ligand and update it's scores.
Parameters
----------
ligand: oddt.toolkit.Molecule object
Ligand to be scored
Returns
-------
ligand: oddt.toolkit.Molecule object
Scored ligand with updated scores
"""
return self.score([ligand])[0]
def predict_ligands(self, ligands):
"""Method to score ligands lazily
Parameters
----------
ligands: iterable of oddt.toolkit.Molecule objects
Ligands to be scored
Returns
-------
ligand: iterator of oddt.toolkit.Molecule objects
Scored ligands with updated scores
"""
return self.score(ligands)
def write_vina_pdbqt(mol, directory, flexible=True, name_id=None):
"""Write single PDBQT molecule to a given directory. For proteins use
`flexible=False` to avoid encoding torsions. Additionally an name ID can
be appended to a name to avoid conflicts.
"""
if name_id is None:
name_id = ''
# We expect name such as 0_ZINC123456.pdbqt or simply ZINC123456.pdbqt if no
# name_id is specified. All non alpha-numeric signs are replaced with underscore.
mol_file = ('_'.join(filter(None, [str(name_id),
re.sub('[^A-Za-z0-9]+', '_', mol.title)]
)) + '.pdbqt')
# prepend path to filename
mol_file = os.path.join(directory, mol_file)
if is_openbabel_molecule(mol):
if flexible:
# auto bonding (b), perserve atom indices (p) and Hs (h)
kwargs = {'opt': {'b': None, 'p': None, 'h': None}}
else:
# for proteins write rigid mol (r) and combine all frags in one (c)
kwargs = {'opt': {'r': None, 'c': None, 'h': None}}
else:
kwargs = {'flexible': flexible}
mol.write('pdbqt', mol_file, overwrite=True, **kwargs)
return mol_file
def parse_vina_scoring_output(output):
"""Function parsing Autodock Vina scoring output to a dictionary
Parameters
----------
output : string
Autodock Vina standard ouptud (STDOUT).
Returns
-------
out : dict
dicitionary containing scores computed by Autodock Vina
"""
out = {}
r = re.compile(r'^(Affinity:|\s{4})')
for line in output.decode('ascii').split('\n')[13:]: # skip some output
if r.match(line):
m = line.replace(' ', '').split(':')
if m[0] == 'Affinity':
m[1] = m[1].replace('(kcal/mol)', '')
out[str('vina_' + m[0].lower())] = float(m[1])
return out
def parse_vina_docking_output(output):
"""Function parsing Autodock Vina docking output to a dictionary
Parameters
----------
output : string
Autodock Vina standard ouptud (STDOUT).
Returns
-------
out : dict
dicitionary containing scores computed by Autodock Vina
"""
out = []
r = re.compile(r'^\s+\d\s+')
for line in output.decode('ascii').split('\n')[13:]: # skip some output
if r.match(line):
s = line.split()
out.append({'vina_affinity': s[1],
'vina_rmsd_lb': s[2],
'vina_rmsd_ub': s[3]})
return out
| bsd-3-clause | 8,064,540,954,755,132,000 | 36.859903 | 85 | 0.534133 | false |
clarin-eric/Centre-Registry | centre-registry-app/centre_registry/migrations/0001_initial.py | 1 | 13641 | # pylint: disable=invalid-name
import centre_registry.models
from django.db import migrations
from django.db import models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name='Centre',
fields=[
('id', models.AutoField(
serialize=False,
primary_key=True,
auto_created=True,
verbose_name='ID')),
('name', models.CharField(
max_length=200, unique=True, verbose_name='Name')),
('shorthand', models.CharField(
max_length=20, unique=True,
verbose_name='Shorthand code')),
('organisation_name', models.CharField(
max_length=100, verbose_name='Organisation')),
('institution', models.CharField(
max_length=200, verbose_name='Institution')),
('working_unit', models.CharField(
max_length=200, verbose_name='Working unit')),
('address', models.CharField(
max_length=100, verbose_name='Address')),
('postal_code', models.CharField(
max_length=8, verbose_name='Postal code')),
('city', models.CharField(
max_length=100, verbose_name='City')),
('latitude', models.CharField(
max_length=20,
validators=[centre_registry.models.validate_latitude],
verbose_name='Latitude')),
('longitude', models.CharField(
max_length=20,
validators=[centre_registry.models.validate_longitude],
verbose_name='Longitude')),
('type_status', models.CharField(
max_length=100,
blank=True,
verbose_name="Comments about centre's type")),
('website_url', models.URLField(
max_length=2000, verbose_name='Website URL')),
('description', models.CharField(
max_length=500, blank=True, verbose_name='Description')),
('expertise', models.CharField(
max_length=200, blank=True, verbose_name='Expertise')),
('type_certificate_url', models.URLField(
max_length=2000,
blank=True,
verbose_name='Centre type certificate URL')),
('dsa_url', models.URLField(
max_length=2000,
blank=True,
verbose_name='Data Seal of Approval URL')),
('pid_status', models.CharField(
max_length=200,
blank=True,
verbose_name='Persistent Identifier usage status')),
('long_term_archiving_policy', models.CharField(
max_length=200,
blank=True,
verbose_name='Long Time Archiving Policy')),
('repository_system', models.CharField(
max_length=200,
blank=True,
verbose_name='Repository system')),
('strict_versioning', models.BooleanField(
default=False, verbose_name='Strict versioning?')),
],
options={
'verbose_name_plural': 'centres',
'verbose_name': 'centre',
},
bases=(models.Model, ), ),
migrations.CreateModel(
name='CentreType',
fields=[
('id', models.AutoField(
serialize=False,
primary_key=True,
auto_created=True,
verbose_name='ID')),
('type', models.CharField(
max_length=1,
unique=True,
verbose_name='Certified centre type')),
],
options={
'verbose_name_plural': 'formal centre types',
'verbose_name': 'formal centre type',
},
bases=(models.Model, ), ),
migrations.CreateModel(
name='Consortium',
fields=[
('id', models.AutoField(
serialize=False,
primary_key=True,
auto_created=True,
verbose_name='ID')),
('country_code', models.CharField(
max_length=3, unique=True, verbose_name='Country code')),
('country_name', models.CharField(
max_length=20, unique=True, verbose_name='Country name')),
('is_observer', models.BooleanField(
default=False, verbose_name='Is observer (not member)?')),
('name', models.CharField(
max_length=20, verbose_name='Name')),
('website_url', models.URLField(
max_length=2000, verbose_name='Website URL')),
('alias', models.CharField(
max_length=25, verbose_name='Alias (... .clarin.eu)')),
],
options={
'verbose_name_plural': 'consortia',
'verbose_name': 'consortium',
},
bases=(models.Model, ), ),
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(
serialize=False,
primary_key=True,
auto_created=True,
verbose_name='ID')),
('email_address', models.EmailField(
max_length=75, verbose_name='E-mail address')),
('name', models.CharField(
max_length=200, unique=True, verbose_name='Name')),
('telephone_number', models.CharField(
max_length=30,
blank=True,
verbose_name='Telephone number (E.123 international '
'notation)')),
('website', models.URLField(
max_length=2000, blank=True, verbose_name='Website')),
],
options={
'verbose_name_plural': 'contacts',
'verbose_name': 'contact',
},
bases=(models.Model, ), ),
migrations.CreateModel(
name='FCSEndpoint',
fields=[
('id', models.AutoField(
serialize=False,
primary_key=True,
auto_created=True,
verbose_name='ID')),
('uri', models.URLField(
max_length=2000, unique=True, verbose_name='Base URI')),
('centre', models.ForeignKey(to='centre_registry.Centre', on_delete=django.db.models.deletion.SET_NULL,
null=True)),
],
options={
'verbose_name_plural': 'FCS endpoints',
'verbose_name': 'FCS endpoint',
},
bases=(models.Model, ), ),
migrations.CreateModel(
name='MetadataFormat',
fields=[
('id', models.AutoField(
serialize=False,
primary_key=True,
auto_created=True,
verbose_name='ID')),
('name', models.CharField(
max_length=30,
unique=True,
verbose_name='Metadata format name')),
],
options={
'verbose_name_plural': 'metadata formats',
'verbose_name': 'metadata format',
},
bases=(models.Model, ), ),
migrations.CreateModel(
name='OAIPMHEndpoint',
fields=[
('id', models.AutoField(
serialize=False,
primary_key=True,
auto_created=True,
verbose_name='ID')),
('web_services_set', models.CharField(
max_length=100,
blank=True,
verbose_name='Web services set')),
('web_services_type', models.CharField(
max_length=10,
blank=True,
verbose_name='Web services type (e.g. SOAP; REST)')),
('uri', models.URLField(
max_length=2000, unique=True, verbose_name='Base URI')),
('centre', models.ForeignKey(
to='centre_registry.Centre',
on_delete=django.db.models.deletion.SET_NULL, null=True)),
('metadata_format', models.ForeignKey(
to='centre_registry.MetadataFormat',
verbose_name='Metadata format',
null=True,
on_delete=django.db.models.deletion.SET_NULL)),
],
options={
'verbose_name_plural': 'OAI-PMH endpoints',
'verbose_name': 'OAI-PMH endpoint',
},
bases=(models.Model, ), ),
migrations.CreateModel(
name='SAMLIdentityFederation',
fields=[
('id', models.AutoField(
serialize=False,
primary_key=True,
auto_created=True,
verbose_name='ID')),
('shorthand', models.CharField(
max_length=20, unique=True,
verbose_name='Shorthand code')),
('information_url', models.URLField(
max_length=1024, verbose_name='Information URL')),
('saml_metadata_url', models.URLField(
max_length=1024, verbose_name='SAML metadata URL')),
],
options={
'verbose_name_plural': 'SAML Identity Federations',
'verbose_name': 'SAML Identity Federation',
},
bases=(models.Model, ), ),
migrations.CreateModel(
name='SAMLServiceProvider',
fields=[
('id', models.AutoField(
serialize=False,
primary_key=True,
auto_created=True,
verbose_name='ID')),
('entity_id', models.URLField(
max_length=1024, unique=True, verbose_name='Entity ID')),
('status_url', models.URLField(
max_length=1024, blank=True, verbose_name='Status URL')),
('centre', models.ForeignKey(
to='centre_registry.Centre',
on_delete=django.db.models.deletion.SET_NULL, null=True)),
],
options={
'verbose_name_plural': 'SAML Service Providers',
'verbose_name': 'SAML Service Provider',
},
bases=(models.Model, ), ),
migrations.CreateModel(
name='URLReference',
fields=[
('id', models.AutoField(
serialize=False,
primary_key=True,
auto_created=True,
verbose_name='ID')),
('description', models.CharField(
max_length=300, verbose_name='Content description')),
('url', models.URLField(
max_length=2000, unique=True, verbose_name='URL')),
('centre', models.ForeignKey(
to='centre_registry.Centre',
on_delete=django.db.models.deletion.CASCADE)),
],
options={
'verbose_name_plural': 'URL references',
'verbose_name': 'URL reference',
},
bases=(models.Model, ), ),
migrations.AddField(
model_name='samlidentityfederation',
name='saml_sps_registered',
field=models.ManyToManyField(
to='centre_registry.SAMLServiceProvider',
blank=True,
verbose_name='SAML SPs Registered'),
preserve_default=True, ),
migrations.AddField(
model_name='centre',
name='administrative_contact',
field=models.ForeignKey(
related_name='administrative_contact',
to='centre_registry.Contact',
on_delete=django.db.models.deletion.PROTECT),
preserve_default=True, ),
migrations.AddField(
model_name='centre',
name='consortium',
field=models.ForeignKey(
to='centre_registry.Consortium',
on_delete=django.db.models.deletion.SET_NULL),
preserve_default=True, ),
migrations.AddField(
model_name='centre',
name='technical_contact',
field=models.ForeignKey(
related_name='technical_contact',
to='centre_registry.Contact',
on_delete=django.db.models.deletion.SET_NULL),
preserve_default=True, ),
migrations.AddField(
model_name='centre',
name='type',
field=models.ManyToManyField(to='centre_registry.CentreType'),
preserve_default=True, ),
]
| gpl-3.0 | -708,127,677,141,282,700 | 41.761755 | 119 | 0.467268 | false |
google/sqlcommenter | python/sqlcommenter-python/tests/sqlalchemy/tests.py | 1 | 4346 | #!/usr/bin/python
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
import sqlalchemy
from google.cloud.sqlcommenter.sqlalchemy.executor import BeforeExecuteFactory
from ..compat import mock, skipIfPy2
from ..opencensus_mock import mock_opencensus_tracer
from ..opentelemetry_mock import mock_opentelemetry_context
class MockConnection:
@property
def engine(self):
class Engine:
@property
def driver(self):
return 'driver'
return Engine()
class SQLAlchemyTestCase(TestCase):
def assertSQL(self, expected_sql, **kwargs):
before_cursor_execute = BeforeExecuteFactory(**kwargs)
sql, params = before_cursor_execute(
MockConnection(), None, 'SELECT 1;', ('param,'), None, None,
)
self.assertEqual(sql, expected_sql)
self.assertEqual(params, ('param,'))
class Tests(SQLAlchemyTestCase):
def test_no_args(self):
self.assertSQL('SELECT 1;')
def test_db_driver(self):
self.assertSQL(
"SELECT 1; /*db_driver='driver'*/",
with_db_driver=True,
)
def test_db_framework(self):
self.assertSQL(
"SELECT 1; /*db_framework='sqlalchemy%%3A{}'*/".format(sqlalchemy.__version__),
with_db_framework=True,
)
def test_opencensus(self):
with mock_opencensus_tracer():
self.assertSQL(
"SELECT 1; /*traceparent='00-trace%%20id-span%%20id-00',"
"tracestate='congo%%3Dt61rcWkgMzE%%2Crojo%%3D00f067aa0ba902b7'*/",
with_opencensus=True,
)
@skipIfPy2
def test_opentelemetry(self):
with mock_opentelemetry_context():
self.assertSQL(
"SELECT 1; /*traceparent='00-000000000000000000000000deadbeef-000000000000beef-00',"
"tracestate='some_key%%3Dsome_value'*/",
with_opentelemetry=True,
)
@skipIfPy2
def test_both_opentelemetry_and_opencensus_warn(self):
with mock.patch(
"google.cloud.sqlcommenter.sqlalchemy.executor.logger"
) as logger_mock, mock_opencensus_tracer(), mock_opentelemetry_context():
self.assertSQL(
"SELECT 1; /*traceparent='00-000000000000000000000000deadbeef-000000000000beef-00',"
"tracestate='some_key%%3Dsome_value'*/",
with_opentelemetry=True,
with_opencensus=True,
)
self.assertEqual(len(logger_mock.warning.mock_calls), 1)
class FlaskTests(SQLAlchemyTestCase):
flask_info = {
'framework': 'flask',
'controller': 'c',
'route': '/',
}
@mock.patch('google.cloud.sqlcommenter.sqlalchemy.executor.get_flask_info', return_value=flask_info)
def test_all_data(self, get_info):
self.assertSQL(
"SELECT 1; /*controller='c',framework='flask',route='/'*/",
)
@mock.patch('google.cloud.sqlcommenter.sqlalchemy.executor.get_flask_info', return_value=flask_info)
def test_framework_disabled(self, get_info):
self.assertSQL(
"SELECT 1; /*controller='c',route='/'*/",
with_framework=False,
)
@mock.patch('google.cloud.sqlcommenter.sqlalchemy.executor.get_flask_info', return_value=flask_info)
def test_controller_disabled(self, get_info):
self.assertSQL(
"SELECT 1; /*framework='flask',route='/'*/",
with_controller=False,
)
@mock.patch('google.cloud.sqlcommenter.sqlalchemy.executor.get_flask_info', return_value=flask_info)
def test_route_disabled(self, get_info):
self.assertSQL(
"SELECT 1; /*controller='c',framework='flask'*/",
with_route=False,
)
| apache-2.0 | -5,383,465,248,542,579,000 | 32.953125 | 104 | 0.627474 | false |
olix0r/vtwt | vtwt/util.py | 1 | 1493 | import re
from htmlentitydefs import name2codepoint
from twisted.python.text import greedyWrap
from twisted.web.error import Error as WebError
# From http://wiki.python.org/moin/EscapingHtml
_HTMLENT_CODEPOINT_RE = re.compile('&({0}|#\d+);'.format(
'|'.join(name2codepoint.keys())))
def recodeText(text):
"""Parses things like & and ὔ into real characters."""
def _entToUnichr(match):
ent = match.group(1)
try:
if ent.startswith("#"):
char = unichr(int(ent[1:]))
else:
char = unichr(name2codepoint[ent])
except:
char = match.group(0)
return char
return _HTMLENT_CODEPOINT_RE.sub(_entToUnichr, text)
_whaleFmt = """\
_{lines}__
|\\/{space} x \\
}} {body} |
|/\\{lines}__-/"""
_whalePaddingLen = 6
def failWhale(error, columns=80):
if isinstance(error, WebError):
emsg = "{0.status} {0.message}".format(error)
else:
emsg = str(error)
width = columns - _whalePaddingLen
lines = []
for line in emsg.splitlines():
lines.extend(greedyWrap(line, width))
lineLength = max(map(len, lines))
msg = "{0}|\n|{0}".format((_whalePaddingLen/2)*" ").join(
map(lambda l: "{0:{1}}".format(l, lineLength),
lines))
return _whaleFmt.format(
space = " "*lineLength,
lines = "_"*lineLength,
length = lineLength,
body = msg)
| bsd-3-clause | -5,111,688,798,574,299,000 | 24.305085 | 68 | 0.561956 | false |
onlynight/wechat-dump | wechat/smiley.py | 1 | 4076 | #!/usr/bin/env python2
# -*- coding: UTF-8 -*-
# File: smiley.py
# Date: Thu Jun 18 00:02:43 2015 +0800
# Author: Yuxin Wu <[email protected]>
import os
import re
import json
import struct
from common.textutil import get_file_b64
STATIC_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'static')
UNICODE_SMILEY_FILE = os.path.join(STATIC_PATH, 'unicode-smiley.json')
TENCENT_SMILEY_FILE = os.path.join(STATIC_PATH, 'tencent-smiley.json')
TENCENT_EXTRASMILEY_FILE = os.path.join(STATIC_PATH, 'tencent-smiley-extra.json')
try:
UNICODE_SMILEY_RE = re.compile(
u'[\U00010000-\U0010ffff]|[\u2600-\u2764]|\u2122|\u00a9|\u00ae|[\ue000-\ue5ff]'
)
except re.error:
# UCS-2 build
UNICODE_SMILEY_RE = re.compile(
u'[\uD800-\uDBFF][\uDC00-\uDFFF]|[\u2600-\u2764]|\u2122|\u00a9|\u00ae|[\ue000-\ue5ff]'
)
HEAD = """.smiley {
padding: 1px;
background-position: -1px -1px;
background-repeat: no-repeat;
width: 20px;
height: 20px;
display: inline-block;
vertical-align: top;
zoom: 1;
}
"""
TEMPLATE = """.smiley{name} {{
background-image: url("data:image/png;base64,{b64}");
}}"""
class SmileyProvider(object):
def __init__(self, html_replace=True):
""" html_replace: replace smileycode by html.
otherwise, replace by plain text
"""
self.html_replace = html_replace
if not html_replace:
raise NotImplementedError()
# [微笑] -> 0
self.tencent_smiley = json.load(open(TENCENT_SMILEY_FILE))
# some extra smiley from javascript on wx.qq.com
extra_smiley = json.load(open(TENCENT_EXTRASMILEY_FILE))
extra_smiley = {u'[' + k + u']': v for k, v in
extra_smiley.iteritems()}
self.tencent_smiley.update(extra_smiley)
# 1f35c -> "\ue340"
#self.unicode_smiley_code = gUnicodeCodeMap
# u'\U0001f35c' -> "e340" # for iphone
# u'\ue415' -> 'e415' # for android
unicode_smiley_dict = json.load(open(UNICODE_SMILEY_FILE))
self.unicode_smiley = {(self.unichar(int(k, 16))): hex(ord(v))[2:] for k, v in
unicode_smiley_dict.iteritems()}
self.unicode_smiley.update({v: hex(ord(v))[2:] for _, v in
unicode_smiley_dict.iteritems()})
self.used_smiley_id = set()
def unichar(self, i):
try:
return unichr(i)
except ValueError:
return struct.pack('i', i).decode('utf-32')
def gen_replace_elem(self, smiley_id):
self.used_smiley_id.add(str(smiley_id))
return '<span class="smiley smiley{}"></span>'.format(smiley_id)
def _replace_unicode(self, msg):
if not UNICODE_SMILEY_RE.findall(msg):
# didn't find the code
return msg
for k, v in self.unicode_smiley.iteritems():
if k in msg:
msg = msg.replace(k, self.gen_replace_elem(v))
return msg
def _replace_tencent(self, msg):
if (not '[' in msg or not ']' in msg) \
and (not '/:' in msg) and (not '/' in msg):
return msg
for k, v in self.tencent_smiley.iteritems():
if k in msg:
msg = msg.replace(k, self.gen_replace_elem(v))
return msg
def replace_smileycode(self, msg):
""" replace the smiley code in msg
return a html
"""
msg = self._replace_unicode(msg)
msg = self._replace_tencent(msg)
return msg
def gen_used_smiley_css(self):
ret = HEAD
for sid in self.used_smiley_id:
fname = os.path.join(STATIC_PATH, 'smileys', '{}.png'.format(sid))
b64 = get_file_b64(fname)
ret = ret + TEMPLATE.format(name=sid, b64=b64)
return ret
if __name__ == '__main__':
smiley = SmileyProvider()
msg = u"[挥手]哈哈呵呵hihi\U0001f684\u2728\u0001 /::<\ue415"
msg = smiley.replace_smileycode(msg)
#print msg
smiley.gen_used_smiley_css()
| gpl-3.0 | -4,862,418,928,206,776,000 | 31.15873 | 94 | 0.57848 | false |
molpopgen/fwdpy11 | examples/discrete_demography/localadaptation.py | 1 | 7832 | #
# Copyright (C) 2019 Kevin Thornton <[email protected]>
#
# This file is part of fwdpy11.
#
# fwdpy11 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# fwdpy11 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with fwdpy11. If not, see <http://www.gnu.org/licenses/>.
#
"""
Local adaptation of a quantitative trait to differing optima.
"""
import argparse
import math
import sys
from collections import namedtuple
import numpy as np
import pandas as pd
import fwdpy11
# Simulations with tree sequence recording need
# to know the max position in a genome. Here,
# we use a length of 1.0. Thus, all mutation
# and recombination events will be uniform
# random variables on the continuous interval
# [0, GENOME_LENGTH).
GENOME_LENGTH = 1.0
# When recording quant-genetic statistics during a simulation,
# we will use this type. Named tuples are extremely efficient,
# and they are easily converted into Pandas DataFrame objects,
# which is very convenient for analysis and output.
Datum = namedtuple("Data", ["generation", "deme", "gbar", "vg", "wbar"])
def make_parser():
"""
Create a command-line interface to the script.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
required = parser.add_argument_group("Required arguments")
required.add_argument("--popsize", "-N", type=int, help="Diploid population size")
required.add_argument(
"--mu", "-m", type=float, help="Mutation rate (per gamete, per generation)"
)
required.add_argument(
"--sigma",
"-s",
type=float,
help="Standard deviation of Gaussian" "distribution of mutational effects",
)
optional = parser.add_argument_group("Optional arguments")
optional.add_argument(
"--rho", type=float, default=1000.0, help="Scaled recombination rate, rho=4Nr"
)
optional.add_argument(
"--VS",
type=float,
default=10.0,
help="Inverse strength of stabilizing selection",
)
optional.add_argument(
"--opt", type=float, default=1.0, help="Value of new phenotypic optimum"
)
optional.add_argument(
"--migrates",
type=float,
nargs=2,
default=None,
help="Migration rates from 0 to 1 and 1 to 0, respectively.",
)
optional.add_argument(
"--time",
type=float,
default=0.1,
help="Amount of time to simulate past" "optimum shift, in units of N",
)
optional.add_argument(
"--plotfile", type=str, default=None, help="File name for plot"
)
optional.add_argument("--seed", type=int, default=42, help="Random number seed.")
return parser
def validate_arguments(args):
"""
Validate input arguments.
Note: this is likely incomplete.
"""
if args.popsize is None:
raise ValueError("popsize cannot be None")
if args.mu < 0:
raise ValueError("mu must be non-negative")
if args.mu is None:
raise ValueError("mu cannot be None")
if args.mu < 0 or math.isfinite(args.mu) is False:
raise ValueError("Mutation rate must be non-negative and finite")
if args.sigma is None:
raise ValueError("sigma cannot be none")
if args.sigma < 0 or math.isfinite(args.sigma) is False:
raise ValueError(
"Std. dev. of distribution of effect sizes"
"must be non-negative and finite"
)
if args.migrates is not None:
for m in args.migrates:
if m < 0 or m > 1:
raise ValueError("migration rates must be 0 <= m <= 1")
def make_migmatrix(migrates):
if migrates is None:
return None
mm = np.zeros(4).reshape(2, 2)
mm[0, 1] = migrates[1]
mm[1, 0] = migrates[0]
rs = np.sum(mm, axis=1)
np.fill_diagonal(mm, 1.0 - rs)
return fwdpy11.MigrationMatrix(mm)
class Recorder(object):
"""
fwdpy11 allows you to define objects that record data
from populations during simulation. Such objects must
be callable, and the easiest way to do things is to
create a class with a __call__ function.
"""
def __init__(self, start):
self.data = []
self.start = start
def __call__(self, pop, recorder):
if pop.generation >= self.start:
# Record mean trait value each generation.
md = np.array(pop.diploid_metadata, copy=False)
demes = np.unique(md["deme"])
for d in demes:
w = np.where(md["deme"] == d)[0]
gbar = md["g"][w].mean()
vg = md["g"][w].var()
wbar = md["w"][w].mean()
self.data.append(Datum(pop.generation, d, gbar, vg, wbar))
def plot_output(data, filename):
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
fig = plt.figure(figsize=(9, 3))
gs = gridspec.GridSpec(ncols=3, nrows=1, figure=fig)
ax_gbar = fig.add_subplot(gs[0, 0])
ax_vg = fig.add_subplot(gs[0, 1])
ax_wbar = fig.add_subplot(gs[0, 2])
df = pd.DataFrame(data, columns=Datum._fields)
g = df.groupby(["deme"])
for n, gi in g:
ax_gbar.plot(gi["generation"], gi["gbar"], label="Deme {}".format(n))
ax_vg.plot(gi["generation"], gi["vg"], label="Deme {}".format(n))
ax_wbar.plot(gi["generation"], gi["wbar"], label="Deme {}".format(n))
for ax in [ax_gbar, ax_vg, ax_wbar]:
ax.set_xlabel("Generation")
ax_gbar.set_ylabel(r"$\bar{g}$")
ax_vg.set_ylabel(r"$V(G)$")
ax_wbar.set_ylabel(r"$\bar{w}$")
ax_gbar.legend()
plt.tight_layout()
plt.savefig(filename)
def runsim(args):
"""
Run the simulation.
"""
pop = fwdpy11.DiploidPopulation(2 * args.popsize, GENOME_LENGTH)
np.random.seed(args.seed)
rng = fwdpy11.GSLrng(args.seed)
GSSmo0 = fwdpy11.GSSmo(
[
fwdpy11.Optimum(when=0, optimum=0.0, VS=args.VS),
fwdpy11.Optimum(when=10 * args.popsize, optimum=args.opt, VS=args.VS),
]
)
GSSmo1 = fwdpy11.GSSmo(
[
fwdpy11.Optimum(when=0, optimum=0.0, VS=args.VS),
fwdpy11.Optimum(
when=10 * args.popsize, optimum=-1.0 * args.opt, VS=args.VS
),
]
)
mm = make_migmatrix(args.migrates)
dd = fwdpy11.DiscreteDemography(
mass_migrations=[fwdpy11.move_individuals(0, 0, 1, 0.5)], migmatrix=mm
)
p = {
"nregions": [], # No neutral mutations -- add them later!
"gvalue": [fwdpy11.Additive(2.0, GSSmo0), fwdpy11.Additive(2.0, GSSmo1)],
"sregions": [fwdpy11.GaussianS(0, GENOME_LENGTH, 1, args.sigma)],
"recregions": [fwdpy11.Region(0, GENOME_LENGTH, 1)],
"rates": (0.0, args.mu, args.rho / float(4 * args.popsize)),
# Keep mutations at frequency 1 in the pop if they affect fitness.
"prune_selected": False,
"demography": dd,
"simlen": 10 * args.popsize + int(args.popsize * args.time),
}
params = fwdpy11.ModelParams(**p)
r = Recorder(10 * args.popsize)
fwdpy11.evolvets(rng, pop, params, 100, r, suppress_table_indexing=True)
if args.plotfile is not None:
plot_output(r.data, args.plotfile)
if __name__ == "__main__":
parser = make_parser()
args = parser.parse_args(sys.argv[1:])
validate_arguments(args)
runsim(args)
| gpl-3.0 | -7,623,108,832,788,986,000 | 30.967347 | 86 | 0.621425 | false |
kastman/lyman | lyman/tests/test_surface.py | 1 | 1392 | import pytest
from .. import surface
class TestSurfaceMeasure(object):
def test_surface_measure_neighbors(self, meshdata):
sm = surface.SurfaceMeasure(meshdata["verts"], meshdata["faces"])
for v, v_n in sm.neighbors.items():
assert v_n == pytest.approx(meshdata["neighbors"][v])
def test_surface_measure_neighbors_from_file(self, meshdata):
sm = surface.SurfaceMeasure.from_file(meshdata["fname"])
for v, v_n in sm.neighbors.items():
assert v_n == pytest.approx(meshdata["neighbors"][v])
def test_surface_measure_distance(self, meshdata):
sm = surface.SurfaceMeasure(meshdata["verts"], meshdata["faces"])
n = meshdata["neighbors"]
d = {0: 0,
1: n[0][1],
2: n[0][2],
3: n[0][3],
4: n[0][2] + n[2][4]}
assert sm(0) == pytest.approx(d)
def test_surface_measure_distance_maxdistance(self, meshdata):
sm = surface.SurfaceMeasure(meshdata["verts"], meshdata["faces"])
n = meshdata["neighbors"]
d = {0: 0,
1: n[0][1]}
assert sm(0, maxdistance=1.1) == pytest.approx(d)
def test_surface_measure_smoke_distances(self, meshdata):
sm = surface.SurfaceMeasure(meshdata["verts"], meshdata["faces"])
for v in range(sm.n_v):
assert isinstance(sm(v), dict)
| bsd-3-clause | 3,423,571,037,266,909,700 | 27.408163 | 73 | 0.58477 | false |
GbalsaC/bitnamiP | django-wiki/wiki/plugins/links/wiki_plugin.py | 1 | 1271 | # -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, url
from django.utils.translation import ugettext_lazy as _
from wiki.conf import settings
from wiki.core.plugins import registry
from wiki.core.plugins.base import BasePlugin
from wiki.plugins.links import views
from wiki.plugins.links.mdx.urlize import makeExtension
from wiki.plugins.links.mdx.djangowikilinks import WikiPathExtension
from django.core.urlresolvers import reverse_lazy
class LinkPlugin(BasePlugin):
slug = 'links'
urlpatterns = patterns('',
url(r'^json/query-urlpath/$', views.QueryUrlPath.as_view(), name='links_query_urlpath'),
)
sidebar = {'headline': _('Links'),
'icon_class': 'icon-bookmark',
'template': 'wiki/plugins/links/sidebar.html',
'form_class': None,
'get_form_kwargs': (lambda a: {})}
wikipath_config = [
('base_url', reverse_lazy('wiki:get', kwargs={'path': ''}) ),
('live_lookups', settings.LINK_LIVE_LOOKUPS ),
('default_level', settings.LINK_DEFAULT_LEVEL ),
]
markdown_extensions = [makeExtension(), WikiPathExtension(wikipath_config)]
def __init__(self):
pass
registry.register(LinkPlugin)
| agpl-3.0 | 1,642,203,236,917,021,200 | 32.447368 | 96 | 0.651456 | false |
OPU-Surveillance-System/monitoring | master/scripts/planner/solvers/test_penalization_plot.py | 1 | 1040 | import matplotlib.pyplot as plt
with open("test_pen", "r") as f:
data = f.read()
data = data.split("\n")[:-1]
data = [data[i].split(" ") for i in range(0, len(data))]
pen = [float(data[i][0]) for i in range(len(data))]
u = [float(data[i][1]) for i in range(len(data))]
d = [float(data[i][2]) for i in range(len(data))]
gain = [((d[i-1] - d[i])) / (u[i] - u[i - 1]) for i in range(1, len(data))]
gain = [gain[0]] + gain
print(u, d, gain)
fig, ax1 = plt.subplots()
pu, = ax1.plot(pen, u, color="r", label="Uncertainty rate")
ax1.scatter(pen, u, color="k")
#ax1.axhline(9000, color="r", linestyle="--")
#ax1.set_title("Cost evolution according to the number of iterations")
ax1.set_xlabel("Penalization coefficient")
ax1.set_ylabel("Uncertainty rate")
ax2 = ax1.twinx()
pd, = ax2.plot(pen, d, color="b", linestyle="--", label="Distance")
ax2.scatter(pen, d, color="k")
ax2.set_ylabel("Distance")
#ax2.axhline(0.99, color="b", linestyle="--")
#plt.axvline(4000000, color="k",linestyle = ":")
plt.legend(handles=[pu, pd], loc=7)
plt.show()
| mit | 1,042,806,748,452,897,400 | 37.518519 | 75 | 0.632692 | false |
Silvian/samaritan | emailservice/views.py | 1 | 2372 | """
@author: Silvian Dragan
@Date: 17/06/2016
@Copyright: Copyright 2016, Samaritan CMA - Published under GNU General Public Licence v3
@Details: https://github.com/Silvian/samaritan
"""
import json
from django.contrib.auth import get_user
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from api.views import success_response, failure_response
from emailservice.forms import EmailOutboxForm
from samaritan.models import Member, ChurchGroup
from django.shortcuts import get_object_or_404
from emailservice.tasks import send_email_task
def send_emails(request, members):
user = get_user(request)
form = EmailOutboxForm(request.POST or None)
if form.is_valid():
outbox = form.save()
outbox.created_by = user
outbox.save()
attachment = request.FILES.get(['attachment'][0], default=None)
if attachment:
outbox.attachment = attachment
outbox.save()
for member in members:
if member.email:
send_email_task.delay(
outbox_id=outbox.id, member_id=member.id
)
return HttpResponse(json.dumps(success_response), content_type='application/json')
return HttpResponse(json.dumps(failure_response), content_type='application/json')
@login_required
def send_members_mail(request):
if request.method == 'POST':
members = Member.objects.filter(
is_active=True, is_member=True
).order_by('last_name')
return send_emails(request, members)
@login_required
def send_guests_mail(request):
if request.method == 'POST':
members = Member.objects.filter(
is_active=True, is_member=False
).order_by('last_name')
return send_emails(request, members)
@login_required
def send_everyone_mail(request):
if request.method == 'POST':
members = Member.objects.filter(
is_active=True
).order_by('last_name')
return send_emails(request, members)
@login_required
def send_group_mail(request):
if request.method == 'POST':
church_group = get_object_or_404(ChurchGroup, id=request.POST['id'])
group_members = church_group.members.filter(
is_active=True
).order_by('last_name')
return send_emails(request, group_members)
| gpl-3.0 | -769,699,822,619,569,500 | 28.283951 | 90 | 0.664418 | false |
kyubifire/softlayer-python | SoftLayer/fixtures/SoftLayer_Virtual_Guest.py | 1 | 19232 | getObject = {
'id': 100,
'hostname': 'vs-test1',
'domain': 'test.sftlyr.ws',
'fullyQualifiedDomainName': 'vs-test1.test.sftlyr.ws',
'status': {'keyName': 'ACTIVE', 'name': 'Active'},
'billingItem': {
'id': 6327,
'nextInvoiceTotalRecurringAmount': 1.54,
'children': [
{'nextInvoiceTotalRecurringAmount': 1},
{'nextInvoiceTotalRecurringAmount': 1},
{'nextInvoiceTotalRecurringAmount': 1},
{'nextInvoiceTotalRecurringAmount': 1},
{'nextInvoiceTotalRecurringAmount': 1},
],
'package': {
"id": 835,
"keyName": "PUBLIC_CLOUD_SERVER"
},
'orderItem': {
'order': {
'userRecord': {
'username': 'chechu',
}
}
}
},
'datacenter': {'id': 50, 'name': 'TEST00',
'description': 'Test Data Center'},
'powerState': {'keyName': 'RUNNING', 'name': 'Running'},
'maxCpu': 2,
'maxMemory': 1024,
'primaryIpAddress': '172.16.240.2',
'globalIdentifier': '1a2b3c-1701',
'primaryBackendIpAddress': '10.45.19.37',
'primaryNetworkComponent': {'speed': 10, 'maxSpeed': 100},
'hourlyBillingFlag': False,
'createDate': '2013-08-01 15:23:45',
'blockDevices': [{'device': 0, 'mountType': 'Disk', 'uuid': 1},
{'device': 1, 'mountType': 'Disk',
'diskImage': {'type': {'keyName': 'SWAP'}}},
{'device': 2, 'mountType': 'CD'},
{'device': 3, 'mountType': 'Disk', 'uuid': 3},
{'device': 4, 'mountType': 'Disk', 'uuid': 4,
'diskImage': {'metadataFlag': True}}],
'notes': 'notes',
'networkVlans': [{'networkSpace': 'PUBLIC',
'vlanNumber': 23,
'id': 1}],
'dedicatedHost': {'id': 37401},
'transientGuestFlag': False,
'operatingSystem': {
'passwords': [{'username': 'user', 'password': 'pass'}],
'softwareLicense': {
'softwareDescription': {'version': '12.04-64 Minimal for VSI',
'name': 'Ubuntu'}}
},
'softwareComponents': [{
'passwords': [{'username': 'user', 'password': 'pass'}],
'softwareLicense': {
'softwareDescription': {'name': 'Ubuntu'}}
}],
'tagReferences': [{'tag': {'name': 'production'}}],
}
getCreateObjectOptions = {
'flavors': [
{
'flavor': {
'keyName': 'B1_1X2X25'
},
'template': {
'supplementalCreateObjectOptions': {
'flavorKeyName': 'B1_1X2X25'
}
}
},
{
'flavor': {
'keyName': 'B1_1X2X25_TRANSIENT'
},
'template': {
'supplementalCreateObjectOptions': {
'flavorKeyName': 'B1_1X2X25_TRANSIENT'
},
'transientGuestFlag': True
}
},
{
'flavor': {
'keyName': 'B1_1X2X100'
},
'template': {
'supplementalCreateObjectOptions': {
'flavorKeyName': 'B1_1X2X100'
}
}
},
{
'flavor': {
'keyName': 'BL1_1X2X100'
},
'template': {
'supplementalCreateObjectOptions': {
'flavorKeyName': 'BL1_1X2X100'
}
}
},
{
'flavor': {
'keyName': 'BL2_1X2X100'
},
'template': {
'supplementalCreateObjectOptions': {
'flavorKeyName': 'BL2_1X2X100'
}
}
},
{
'flavor': {
'keyName': 'C1_1X2X25'
},
'template': {
'supplementalCreateObjectOptions': {
'flavorKeyName': 'C1_1X2X25'
}
}
},
{
'flavor': {
'keyName': 'M1_1X2X100'
},
'template': {
'supplementalCreateObjectOptions': {
'flavorKeyName': 'M1_1X2X100'
}
}
},
{
'flavor': {
'keyName': 'AC1_1X2X100'
},
'template': {
'supplementalCreateObjectOptions': {
'flavorKeyName': 'AC1_1X2X100'
}
}
},
{
'flavor': {
'keyName': 'ACL1_1X2X100'
},
'template': {
'supplementalCreateObjectOptions': {
'flavorKeyName': 'ACL1_1X2X100'
}
}
},
],
'processors': [
{
'itemPrice': {
'item': {'description': '1 x 2.0 GHz Core'},
'hourlyRecurringFee': '.07',
'recurringFee': '29'
},
'template': {'startCpus': 1}
},
{
'itemPrice': {
'item': {'description': '2 x 2.0 GHz Cores'},
'hourlyRecurringFee': '.14',
'recurringFee': '78'
},
'template': {'startCpus': 2}
},
{
'itemPrice': {
'item': {'description': '3 x 2.0 GHz Cores'},
'hourlyRecurringFee': '.205',
'recurringFee': '123.5'
},
'template': {'startCpus': 3}
},
{
'itemPrice': {
'item': {'description': '4 x 2.0 GHz Cores'},
'hourlyRecurringFee': '.265',
'recurringFee': '165.5'
},
'template': {'startCpus': 4}
},
{
'itemPrice': {
'hourlyRecurringFee': '.209',
'recurringFee': '139',
'dedicatedHostInstanceFlag': False,
'item': {
'description': '1 x 2.0 GHz Cores (Dedicated)'
}
},
'template': {
'dedicatedAccountHostOnlyFlag': True,
'startCpus': 1
}
},
{
'itemPrice': {
'hourlyRecurringFee': '0',
'recurringFee': '0',
'dedicatedHostInstanceFlag': True,
'item': {
'description': '56 x 2.0 GHz Cores (Dedicated Host)'
}
},
'template': {
'startCpus': 56,
'dedicatedHost': {
'id': None
}
}
},
{
'itemPrice': {
'hourlyRecurringFee': '0',
'recurringFee': '0',
'dedicatedHostInstanceFlag': True,
'item': {
'description': '4 x 2.0 GHz Cores (Dedicated Host)'
}
},
'template': {
'startCpus': 4,
'dedicatedHost': {
'id': None
}
}
},
],
'memory': [
{
'itemPrice': {
'item': {'description': '1 GB'},
'hourlyRecurringFee': '.03',
'recurringFee': '21'
},
'template': {'maxMemory': 1024}
},
{
'itemPrice': {
'item': {'description': '2 GB'},
'hourlyRecurringFee': '.06',
'recurringFee': '42'
},
'template': {'maxMemory': 2048}
},
{
'itemPrice': {
'item': {'description': '3 GB'},
'hourlyRecurringFee': '.085',
'recurringFee': '59.5'},
'template': {'maxMemory': 3072}
},
{
'itemPrice': {
'item': {'description': '4 GB'},
'hourlyRecurringFee': '.11',
'recurringFee': '77'
},
'template': {'maxMemory': 4096}
},
{
'itemPrice': {
'hourlyRecurringFee': '0',
'recurringFee': '0',
'dedicatedHostInstanceFlag': True,
'item': {
'description': '64 GB (Dedicated Host)'
}
},
'template': {
'maxMemory': 65536
}
},
{
'itemPrice': {
'hourlyRecurringFee': '0',
'recurringFee': '0',
'dedicatedHostInstanceFlag': True,
'item': {
'description': '8 GB (Dedicated Host)'
}
},
'template': {
'maxMemory': 8192
}
},
],
'blockDevices': [
{
'itemPrice': {
'item': {'description': '25 GB (LOCAL)'},
'hourlyRecurringFee': '0',
'recurringFee': '0'},
'template': {
'blockDevices': [
{'device': '0', 'diskImage': {'capacity': 25}}
],
'localDiskFlag': True
}
},
{
'itemPrice': {
'item': {'description': '100 GB (LOCAL)'},
'hourlyRecurringFee': '.01',
'recurringFee': '7'
},
'template': {
'blockDevices': [
{'device': '0', 'diskImage': {'capacity': 100}}
],
'localDiskFlag': True
}
},
],
'operatingSystems': [
{
'itemPrice': {
'item': {
'description': 'CentOS 6.0 - Minimal Install (64 bit)'
},
'hourlyRecurringFee': '0',
'recurringFee': '0'
},
'template': {
'operatingSystemReferenceCode': 'CENTOS_6_64'
}
},
{
'itemPrice': {
'item': {
'description': 'Debian GNU/Linux 7.0 Wheezy/Stable -'
' Minimal Install (64 bit)'
},
'hourlyRecurringFee': '0',
'recurringFee': '0'
},
'template': {
'operatingSystemReferenceCode': 'DEBIAN_7_64'
}
},
{
'itemPrice': {
'item': {
'description': 'Ubuntu Linux 12.04 LTS Precise'
' Pangolin - Minimal Install (64 bit)'
},
'hourlyRecurringFee': '0',
'recurringFee': '0'
},
'template': {
'operatingSystemReferenceCode': 'UBUNTU_12_64'
}
},
],
'networkComponents': [
{
'itemPrice': {
'item': {
'description': '10 Mbps Public & Private Networks'
},
'hourlyRecurringFee': '0',
'recurringFee': '0'},
'template': {
'networkComponents': [{'maxSpeed': 10}]
}
},
{
'itemPrice': {
'item': {'description': '100 Mbps Private Network'},
'hourlyRecurringFee': '0',
'recurringFee': '0'},
'template': {
'networkComponents': [{'maxSpeed': 100}]
}
},
{
'itemPrice': {
'item': {'description': '1 Gbps Private Network'},
'hourlyRecurringFee': '.02',
'recurringFee': '10'
},
'template': {
'networkComponents': [{'maxSpeed': 1000}]
}
},
{
'itemPrice': {
'hourlyRecurringFee': '0',
'recurringFee': '0',
'dedicatedHostInstanceFlag': True,
'item': {
'description': '1 Gbps Public & Private Network Uplinks (Dedicated Host)'
}
},
'template': {
'networkComponents': [
{
'maxSpeed': 1000
}
],
'privateNetworkOnlyFlag': False
}
},
],
'datacenters': [
{'template': {'datacenter': {'name': 'ams01'}}},
{'template': {'datacenter': {'name': 'dal05'}}},
],
}
getReverseDomainRecords = [{
'networkAddress': '12.34.56.78',
'name': '12.34.56.78.in-addr.arpa',
'resourceRecords': [{'data': 'test.softlayer.com.', 'id': 987654}],
'updateDate': '2013-09-11T14:36:57-07:00',
'serial': 1234665663,
'id': 123456,
}]
editObject = True
deleteObject = True
setPrivateNetworkInterfaceSpeed = True
setPublicNetworkInterfaceSpeed = True
createObject = getObject
createObjects = [getObject]
generateOrderTemplate = {
"imageTemplateId": None,
"location": "1854895",
"packageId": 835,
"presetId": 405,
"prices": [
{
"hourlyRecurringFee": "0",
"id": 45466,
"recurringFee": "0",
"item": {
"description": "CentOS 7.x - Minimal Install (64 bit)"
}
},
{
"hourlyRecurringFee": "0",
"id": 2202,
"recurringFee": "0",
"item": {
"description": "25 GB (SAN)"
}
},
{
"hourlyRecurringFee": "0",
"id": 905,
"recurringFee": "0",
"item": {
"description": "Reboot / Remote Console"
}
},
{
"hourlyRecurringFee": ".02",
"id": 899,
"recurringFee": "10",
"item": {
"description": "1 Gbps Private Network Uplink"
}
},
{
"hourlyRecurringFee": "0",
"id": 1800,
"item": {
"description": "0 GB Bandwidth Allotment"
}
},
{
"hourlyRecurringFee": "0",
"id": 21,
"recurringFee": "0",
"item": {
"description": "1 IP Address"
}
},
{
"hourlyRecurringFee": "0",
"id": 55,
"recurringFee": "0",
"item": {
"description": "Host Ping"
}
},
{
"hourlyRecurringFee": "0",
"id": 57,
"recurringFee": "0",
"item": {
"description": "Email and Ticket"
}
},
{
"hourlyRecurringFee": "0",
"id": 58,
"recurringFee": "0",
"item": {
"description": "Automated Notification"
}
},
{
"hourlyRecurringFee": "0",
"id": 420,
"recurringFee": "0",
"item": {
"description": "Unlimited SSL VPN Users & 1 PPTP VPN User per account"
}
},
{
"hourlyRecurringFee": "0",
"id": 418,
"recurringFee": "0",
"item": {
"description": "Nessus Vulnerability Assessment & Reporting"
}
}
],
"quantity": 1,
"sourceVirtualGuestId": None,
"sshKeys": [],
"useHourlyPricing": True,
"virtualGuests": [
{
"domain": "test.local",
"hostname": "test"
}
],
"complexType": "SoftLayer_Container_Product_Order_Virtual_Guest"
}
setUserMetadata = ['meta']
reloadOperatingSystem = 'OK'
setTags = True
createArchiveTransaction = {
'createDate': '2018-12-10T17:29:18-06:00',
'elapsedSeconds': 0,
'guestId': 12345678,
'hardwareId': None,
'id': 12345,
'modifyDate': '2018-12-10T17:29:18-06:00',
'statusChangeDate': '2018-12-10T17:29:18-06:00'
}
executeRescueLayer = True
getUpgradeItemPrices = [
{
'id': 1007,
'categories': [{'id': 80,
'name': 'Computing Instance',
'categoryCode': 'guest_core'}],
'item': {
'capacity': '4',
'units': 'PRIVATE_CORE',
'description': 'Computing Instance (Dedicated)',
}
},
{
'id': 1144,
'locationGroupId': None,
'categories': [{'id': 80,
'name': 'Computing Instance',
'categoryCode': 'guest_core'}],
'item': {
'capacity': '4',
'units': 'CORE',
'description': 'Computing Instance',
}
},
{
'id': 332211,
'locationGroupId': 1,
'categories': [{'id': 80,
'name': 'Computing Instance',
'categoryCode': 'guest_core'}],
'item': {
'capacity': '4',
'units': 'CORE',
'description': 'Computing Instance',
}
},
{
'id': 1122,
'categories': [{'id': 26,
'name': 'Uplink Port Speeds',
'categoryCode': 'port_speed'}],
'item': {
'capacity': '1000',
'description': 'Public & Private Networks',
}
},
{
'id': 1144,
'categories': [{'id': 26,
'name': 'Uplink Port Speeds',
'categoryCode': 'port_speed'}],
'item': {
'capacity': '1000',
'description': 'Private Networks',
}
},
{
'id': 1133,
'categories': [{'id': 3,
'name': 'RAM',
'categoryCode': 'ram'}],
'item': {
'capacity': '2',
'description': 'RAM',
}
},
]
DEDICATED_GET_UPGRADE_ITEM_PRICES = [
{
'id': 115566,
'categories': [{'id': 80,
'name': 'Computing Instance',
'categoryCode': 'guest_core'}],
'item': {
'capacity': '4',
'units': 'DEDICATED_CORE',
'description': 'Computing Instance (Dedicated Host)',
}
},
]
getMetricTrackingObjectId = 1000
getBandwidthAllotmentDetail = {
'allocationId': 25465663,
'bandwidthAllotmentId': 138442,
'effectiveDate': '2019-04-03T23:00:00-06:00',
'endEffectiveDate': None,
'id': 25888247,
'serviceProviderId': 1,
'allocation': {
'amount': '250'
}
}
getBillingCycleBandwidthUsage = [
{
'amountIn': '.448',
'amountOut': '.52157',
'type': {
'alias': 'PUBLIC_SERVER_BW'
}
},
{
'amountIn': '.03842',
'amountOut': '.01822',
'type': {
'alias': 'PRIVATE_SERVER_BW'
}
}
]
| mit | -4,988,771,412,386,916,000 | 27.619048 | 93 | 0.391275 | false |
d120/pyfeedback | src/feedback/models/fragebogen2016.py | 1 | 14676 | # coding=utf-8
from django.db import models
from feedback.models import Fragebogen, Ergebnis
class Fragebogen2016(Fragebogen):
fach = models.CharField(max_length=5, choices=Fragebogen.FACH_CHOICES, blank=True)
abschluss = models.CharField(max_length=5, choices=Fragebogen.ABSCHLUSS_CHOICES, blank=True)
semester = models.CharField(max_length=4, choices=Fragebogen.SEMESTER_CHOICES16, blank=True)
geschlecht = models.CharField(max_length=1, choices=Fragebogen.GESCHLECHT_CHOICES, blank=True)
studienberechtigung = models.CharField(max_length=1, choices=Fragebogen.STUDIENBERECHTIGUNG_CHOICES, blank=True)
pflichveranstaltung = models.CharField(max_length=1, choices=Fragebogen.BOOLEAN_CHOICES, blank=True)
male_veranstaltung_gehoert = models.CharField(max_length=1, choices=Fragebogen.VERANSTALTUNG_GEHOERT, blank=True)
pruefung_angetreten = models.CharField(max_length=1, choices=Fragebogen.KLAUSUR_ANGETRETEN, blank=True)
v_wie_oft_besucht = models.PositiveSmallIntegerField(blank=True, null=True)
v_besuch_ueberschneidung = models.CharField(max_length=1, choices=Fragebogen.BOOLEAN_CHOICES, blank=True)
v_besuch_qualitaet = models.CharField(max_length=1, choices=Fragebogen.BOOLEAN_CHOICES, blank=True)
v_besuch_verhaeltnisse = models.CharField(max_length=1, choices=Fragebogen.BOOLEAN_CHOICES, blank=True)
v_besuch_privat = models.CharField(max_length=1, choices=Fragebogen.BOOLEAN_CHOICES, blank=True)
v_besuch_elearning = models.CharField(max_length=1, choices=Fragebogen.BOOLEAN_CHOICES, blank=True)
v_besuch_zufrueh = models.CharField(max_length=1, choices=Fragebogen.BOOLEAN_CHOICES, blank=True)
v_besuch_sonstiges = models.CharField(max_length=1, choices=Fragebogen.BOOLEAN_CHOICES, blank=True)
v_3_1 = models.PositiveSmallIntegerField(blank=True, null=True)
v_3_2 = models.PositiveSmallIntegerField(blank=True, null=True)
v_3_3 = models.PositiveSmallIntegerField(blank=True, null=True)
v_3_4 = models.PositiveSmallIntegerField(blank=True, null=True)
v_3_5 = models.PositiveSmallIntegerField(blank=True, null=True)
v_3_6 = models.PositiveSmallIntegerField(blank=True, null=True)
v_3_7 = models.PositiveSmallIntegerField(blank=True, null=True)
v_3_8 = models.PositiveSmallIntegerField(blank=True, null=True)
v_3_9 = models.PositiveSmallIntegerField(blank=True, null=True)
v_3_10 = models.PositiveSmallIntegerField(blank=True, null=True)
v_3_11 = models.PositiveSmallIntegerField(blank=True, null=True)
v_3_12 = models.PositiveSmallIntegerField(blank=True, null=True)
v_3_13 = models.PositiveSmallIntegerField(blank=True, null=True)
v_4_1 = models.PositiveSmallIntegerField(blank=True, null=True)
v_4_2 = models.PositiveSmallIntegerField(blank=True, null=True)
v_4_3 = models.PositiveSmallIntegerField(blank=True, null=True)
v_4_4 = models.PositiveSmallIntegerField(blank=True, null=True)
v_4_5 = models.PositiveSmallIntegerField(blank=True, null=True)
v_4_6 = models.PositiveSmallIntegerField(blank=True, null=True)
v_4_7 = models.PositiveSmallIntegerField(blank=True, null=True)
v_4_8 = models.PositiveSmallIntegerField(blank=True, null=True)
v_4_9 = models.PositiveSmallIntegerField(blank=True, null=True)
v_5_1 = models.PositiveSmallIntegerField(blank=True, null=True)
v_5_2 = models.PositiveSmallIntegerField(blank=True, null=True)
v_6_1 = models.CharField(max_length=1, choices=Fragebogen.STUNDEN_NACHBEARBEITUNG, blank=True)
v_6_2 = models.CharField(max_length=3, blank=True)
v_6_3 = models.PositiveSmallIntegerField(blank=True, null=True)
v_6_4 = models.PositiveSmallIntegerField(blank=True, null=True)
v_6_5 = models.PositiveSmallIntegerField(blank=True, null=True)
v_6_8 = models.CharField(max_length=1, choices=Fragebogen.BOOLEAN_CHOICES, blank=True)
class Meta:
verbose_name = 'Fragebogen 2016'
verbose_name_plural = 'Fragebögen 2016'
ordering = ['semester', 'veranstaltung']
app_label = 'feedback'
class Ergebnis2016(Ergebnis):
parts_vl = [
['v_6_5', 'Vorlesung: Gesamtnote',
['6.5 Welche Gesamtnote würdest Du der Vorlesung (ohne Übungen) geben?']],
['v_didaktik', 'Vorlesung: Didaktik',
['3.3 Die Lernziele der Veranstaltung sind mir klar geworden.',
'3.4 Der Stoff wurde anhand von Beispielen verdeutlicht.',
'3.9 Ich habe durch diese Veranstaltung viel gelernt.',
'3.10 Mein Vorwissen war ausreichend, um der Vorlesung folgen zu können.',
'3.11 Ich kann abschätzen, was in der Prüfung von mir erwartet wird.',
'4.1 Die Lehrkraft hat Kompliziertes verständlich dargelegt.',
'4.3 Die Lehrkraft hat die Vorlesung rhetorisch gut gestaltet.',
'4.4 Die Lehrkraft hat die Vorlesung didaktisch gut gestaltet.',
'4.6 Der Lehrende regte gezielt zur eigenen Mitarbeit / zum Mitdenken in der Vorlesung an.',
'4.7 Die Lehrkraft hat elektronische Plattformen sinnvoll und hilfreich eingesetzt.']],
['v_organisation', 'Vorlesung: Organisation',
['3.1 Die Vorlesung war inhaltlich gut strukturiert, ein roter Faden war erkennbar.',
'3.2 Die Organisation der Vorlesung war gut.',
'3.6 Die (Zwischen-)Fragen der Studierenden wurden angemessen beantwortet.',
'4.2 Die Lehrkraft zeigte sich gut vorbereitet.',
'4.5 Der Lehrende war auch außerhalb der Vorlesung ansprechbar.',
'4.8 Die Sprachkenntnisse der Lehrkraft in der Vorlesungssprache waren gut.',
'4.9 Die Lehrkraft hielt die Vorlesung größtenteils selbst.']],
['v_praxisbezug_motivation', 'Vorlesung: Praxisbezug und Motivation',
['3.5 Der Bezug zwischen Theorie und praktischem Arbeiten / praktischen Anwendungen wurde hergestellt.',
'3.8 Die Vorlesung motivierte dazu, sich außerhalb der Veranstaltung selbstständig mit den behandelten Themen auseinanderzusetzen.']],
]
parts_ue = [
['ue_didaktik', 'Übung: Didaktik',
['4.1 Die Übung war inhaltlich gut strukturiert.',
'4.2 Die Lernziele der Übung sind mir klar geworden.',
'5.2 Der*Die Tutor*in hat gut und verständlich erklärt.',
'5.3 Der*Die Tutor*in hat die Gruppe motiviert.',
'5.4 Der*Die Tutor*in war fachlich kompetent.',
'5.5 Der*Die Tutor*in zeigte sich gut vorbereitet.',
'5.6 Der*Die Tutor*in hat die Übungstunde gut strukturiert.',
'5.7 Der*Die Tutor*in war engagiert.',
'5.8 Der*Die Tutor*in stellte wesentliche Punkte zur Bearbeitung der Aufgaben vor.',
'5.9 Der*Die Tutor*in regte mich gezielt zum Mitdenken und zu eigener Mitarbeit an.',
'5.10 Der*Die Tutor*in setzte verfügbare Medien (z. B. Tafel, Projektor, Beamer) sinnvoll ein.',
'5.11 Der*Die Tutor*in hat elektronische Plattformen sinnvoll und hilfreich eingesetzt.',
'5.15 Der*Die Tutor*in hat konstruktives bzw. gutes Feedback gegeben.']],
['ue_organisation', 'Übung: Organisation',
['3.3 Die Aufgabenstellungen waren verständlich.',
'3.4 Die Übungsaufgaben hatten inhaltlich eine klare Struktur.',
'3.5 Die Übungsaufgaben waren motivierend.',
'3.6 Es wurden ausreichend Lösungsvorschläge bereitgestellt bzw. präsentiert.',
'3.7 Der Stoff der Vorlesung war gut auf die Übungen abgestimmt.',
'3.8 Mein Vorwissen war ausreichend, um die Übungsaufgaben bearbeiten zu können.',
'4.3 Die Organisation des Übungsbetriebs war gut.',
'4.4 Es wurde genug Übungsmaterial (Aufgaben, etc.) zur Verfügung gestellt.',
'4.5 Es stand genug Zeit für die Bearbeitung der Aufgaben zur Verfügung.',
'4.6 Die Abgaben waren gut vereinbar mit anderen Veranstaltungen laut Regelstudienplan.']],
['ue_arbeitsbedingungen', 'Übung: Arbeitsbedingungen',
['4.7 Die Auswahlmöglichkeiten der Termine waren angemessen bzw. der Übungszeitpunkt war passend.',
'4.8 Die Gruppengröße war zufriedenstellend.',
'4.9 Der Raum für die Übungen war zum Arbeiten und Lernen geeignet.']],
['ue_umgang', 'Übung: Umgang',
['5.12 Der*Die Tutor*in erschien pünktlich.',
'5.13 Der*Die Tutor*in behandelte alle Studierenden respektvoll.',
'5.14 Der*Die Tutor*in teilte die Zeit zwischen den Studierenden angemessen auf.',
'5.16 Der*Die Tutor*in hat nachvollziehbar bewertet bzw. benotet.']],
['ue_lernerfolg', 'Übung: Lernerfolg',
['3.1 Durch die Aufgaben und den Übungsbetrieb habe ich viel gelernt.',
'3.2 Die Übungen haben mir geholfen, den Stoff der Vorlesung besser zu verstehen.']],
]
parts = parts_vl + parts_ue
hidden_parts = [
['v_feedbackpreis', 'Feedbackpreis: Beste Vorlesung',
['2.4 Die Vorlesung war inhaltlich gut strukturiert, ein roter Faden war erkennbar.',
'2.5 Die Lernziele der Veranstaltung sind mir klar geworden.',
'2.6 Die Lehrkraft hat Kompliziertes verständlich dargelegt.',
'2.7 Der Stoff wurde anhand von Beispielen verdeutlicht.',
'2.8 Die Lehrkraft zeigte Bezüge zur aktuellen Forschung auf.',
'2.9 Der Bezug zwischen Theorie und praktischem Arbeiten / praktischen Anwendungen wurde hergestellt.',
'2.10 Das Tempo der Vorlesung war angemessen.',
'2.11 Die Lehrkraft zeigte sich gut vorbereitet.',
'2.12 Die (Zwischen-)Fragen der Studierenden wurden angemessen beantwortet.',
'2.13 Der Lehrende war auch außerhalb der Veranstaltung ansprechbar.',
'2.14 Der Lehrende regte gezielt zur eigenen Mitarbeit / zum Mitdenken in der Veranstaltung an.',
'3.8 Die Vorlesung motivierte dazu, sich außerhalb der Veranstaltungselbstständig mit den behandelten Themen auseinander zu setzen.',
'3.7 Die Vorlesungsmaterialien (Folien, Skripte, Tafelanschrieb, Lehrbücher,e-Learning, etc.) haben das Lernen wirkungsvoll unterstützt.',
'6.5 Welche Gesamtnote würdest Du der Vorlesung (ohne Übungen) geben?']],
['ue_feedbackpreis', 'Feedbackpreis: Beste Übung',
['3.1 Durch die Aufgaben und den Übungsbetrieb habe ich viel gelernt.',
'3.2 Die Übungen haben mir geholfen, den Stoff der Vorlesung besser zu verstehen.',
'3.3 Die Aufgabenstellungen waren verständlich.',
'3.4 Die Übungsaufgaben hatten inhaltlich eine klare Struktur.',
'3.5 Die Übungsaufgaben waren motivierend.',
'3.7 Der Stoff der Vorlesung war gut auf die Übungen abgestimmt.',
'4.1 Die Übung war inhaltlich gut strukturiert.',
'4.2 Die Lernziele der Übung sind mir klar geworden.',
'4.3 Die Organisation des Übungsbetriebs war gut.',
'4.4 Es wurde genug Übungsmaterial (Aufgaben, etc.) zur Verfügung gestellt.',
'4.5 Es stand genug Zeit für die Bearbeitung der Aufgaben zur Verfügung.',
'6.3 Welche Gesamtnote gibst du der Übung?']],
]
weight = {
'v_feedbackpreis': [1] * 13 + [13],
'ue_feedbackpreis': [1] * 10 + [10],
}
#TODO: decimal statt float benutzen
v_didaktik = models.FloatField(blank=True, null=True)
v_didaktik_count = models.PositiveIntegerField(default=0)
v_didaktik_parts = ['v_3_3', 'v_3_4', 'v_3_9', 'v_3_10', 'v_4_1', 'v_4_3', 'v_4_4', 'v_4_6', 'v_4_7']
v_organisation = models.FloatField(blank=True, null=True)
v_organisation_count = models.PositiveIntegerField(default=0)
v_organisation_parts = ['v_3_1', 'v_3_2', 'v_3_6', 'v_4_2', 'v_4_5', 'v_4_7', 'v_4_8', 'v_4_9']
v_praxisbezug_motivation = models.FloatField(blank=True, null=True)
v_praxisbezug_motivation_count = models.PositiveIntegerField(default=0)
v_praxisbezug_motivation_parts = ['v_3_5', 'v_4_8']
v_6_5 = models.FloatField(blank=True, null=True)
v_6_5_count = models.PositiveIntegerField(default=0)
v_feedbackpreis = models.FloatField(blank=True, null=True)
v_feedbackpreis_count = models.PositiveIntegerField(default=0)
v_feedbackpreis_parts = ['v_3_1', 'v_3_2', 'v_3_3', 'v_3_4', 'v_3_5', 'v_3_6', 'v_3_7', 'v_3_8', 'v_3_9', 'v_4_1', 'v_4_2', 'v_4_3', 'v_4_4',
'v_4_5', 'v_4_6', 'v_4_9', 'v_6_2', 'v_6_5', 'v_gesamt']
ue_didaktik = models.FloatField(blank=True, null=True)
ue_didaktik_count = models.PositiveIntegerField(default=0)
ue_didaktik_parts = ['ue_4_1', 'ue_4_2', 'ue_5_2', 'ue_5_3', 'ue_5_4', 'ue_5_5', 'ue_5_6', 'ue_5_7', 'ue_5_8', 'ue_5_9', 'ue_5_10', 'ue_5_11', 'ue_5_15']
ue_organisation = models.FloatField(blank=True, null=True)
ue_organisation_count = models.PositiveIntegerField(default=0)
ue_organisation_parts = ['ue_3_3', 'ue_3_4', 'ue_3_5', 'ue_3_6', 'ue_3_7', 'ue_3_8', 'ue_4_3', 'ue_4_4', 'ue_4_5', 'ue_4_6']
ue_arbeitsbedingungen = models.FloatField(blank=True, null=True)
ue_arbeitsbedingungen_count = models.PositiveIntegerField(default=0)
ue_arbeitsbedingungen_parts = ['ue_4_7', 'ue_4_8', 'ue_4_9']
ue_umgang = models.FloatField(blank=True, null=True)
ue_umgang_count = models.PositiveIntegerField(default=0)
ue_umgang_parts = ['ue_5_12', 'ue_5_13', 'ue_5_14', 'ue_5_16']
ue_lernerfolg = models.FloatField(blank=True, null=True)
ue_lernerfolg_count = models.PositiveIntegerField(default=0)
ue_lernerfolg_parts = ['ue_3_1', 'ue_3_2']
ue_feedbackpreis = models.FloatField(blank=True, null=True)
ue_feedbackpreis_count = models.PositiveIntegerField(default=0)
ue_feedbackpreis_parts = ['ue_3_1', 'ue_3_2', 'ue_3_3', 'ue_3_4', 'ue_3_5', 'ue_3_7', 'ue_4_1', 'ue_4_2', 'ue_4_3', 'ue_4_4', 'ue_4_5', 'ue_6_3']
gesamt = models.FloatField(blank=True, null=True)
gesamt_count = models.PositiveIntegerField(default=0)
class Meta:
verbose_name = 'Ergebnis 2016'
verbose_name_plural = 'Ergebnisse 2016'
ordering = ['veranstaltung']
app_label = 'feedback'
| agpl-3.0 | 4,768,601,658,121,352,000 | 66 | 157 | 0.658291 | false |
littley/network_cjl | network_cjl/ReceiveRequest.py | 1 | 1833 | import time
class ReceiveRequest(object):
"""
A ReceiveRequest is generated every time the first packet from a message is received. The ReceiveRequest
then keeps track of all of the message's packets that have already ben received.
"""
def __init__(self, inital_packet, (host, port)):
self.packets = set()
self.total_packets = inital_packet.total_packets
self.sequence_number = inital_packet.sequence_number
self.hash = inital_packet.hash
self.start_time = time.time()
self.register_packet(inital_packet)
self.host = host
self.port = port
def complete(self):
"""
Returns True if this receive request has received all of its required packets
:return:
"""
return len(self.packets) == self.total_packets
def get_payload(self):
"""
Call this after completed. Will return the original payload
"""
self.packets = list(self.packets)
self.packets = sorted(self.packets, key=lambda pkt: pkt.packet_number)
payload = []
for packet in self.packets:
payload.append(packet.payload)
return ''.join(payload)
def owns_packet(self, packet):
"""
This function returns True if this ReceiveRequest corresponds to the given packet
:param packet: a Packet
"""
return self.hash == packet.hash \
and self.sequence_number == packet.sequence_number
def register_packet(self, packet):
self.packets.add(packet)
def packets_observed(self):
return len(self.packets)
def __eq__(self, other):
return self.sequence_number == other.sequence_number \
and self.hash == other.hash
def __hash__(self):
return hash(self.hash)
| apache-2.0 | -5,376,255,074,754,944,000 | 29.55 | 109 | 0.61593 | false |
DedMemez/ODS-August-2017 | golf/GolfGlobals.py | 1 | 13007 | # Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.golf.GolfGlobals
from direct.directnotify import DirectNotifyGlobal
import random
MAX_PLAYERS_PER_HOLE = 4
GOLF_BALL_RADIUS = 0.25
GOLF_BALL_VOLUME = 4.0 / 3.0 * 3.14159 * GOLF_BALL_RADIUS ** 3
GOLF_BALL_MASS = 0.5
GOLF_BALL_DENSITY = GOLF_BALL_MASS / GOLF_BALL_VOLUME
GRASS_SURFACE = 0
BALL_SURFACE = 1
HARD_SURFACE = 2
HOLE_SURFACE = 3
SLICK_SURFACE = 4
OOB_RAY_COLLIDE_ID = -1
GRASS_COLLIDE_ID = 2
HARD_COLLIDE_ID = 3
TOON_RAY_COLLIDE_ID = 4
MOVER_COLLIDE_ID = 7
WINDMILL_BASE_COLLIDE_ID = 8
CAMERA_RAY_COLLIDE_ID = 10
BALL_COLLIDE_ID = 42
HOLE_CUP_COLLIDE_ID = 64
SKY_RAY_COLLIDE_ID = 78
SLICK_COLLIDE_ID = 13
BALL_CONTACT_FRAME = 9
BALL_CONTACT_TIME = (BALL_CONTACT_FRAME + 1) / 24.0
AIM_DURATION = 60
TEE_DURATION = 15
RANDOM_HOLES = True
KICKOUT_SWINGS = 2
TIME_TIE_BREAKER = True
CourseInfo = {0: {'name': '',
'numHoles': 3,
'holeIds': (2, 3, 4, 5, 6, 7, 8, 12, 13, 15, 16)},
1: {'name': '',
'numHoles': 6,
'holeIds': ((0, 5),
(1, 5),
2,
3,
4,
5,
6,
7,
8,
9,
10,
(11, 5),
12,
13,
(14, 5),
15,
16,
(17, 5),
(20, 5),
(21, 5),
(22, 5),
(23, 5),
(24, 5),
(25, 5),
(26, 5),
(28, 5),
(30, 5),
(31, 5),
(33, 5),
(34, 5))},
2: {'name': '',
'numHoles': 9,
'holeIds': ((1, 5),
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
(14, 5),
15,
(17, 5),
(18, 20),
(19, 20),
(20, 20),
(21, 5),
(22, 5),
(23, 20),
(24, 20),
(25, 20),
(26, 20),
(27, 20),
(28, 20),
(29, 20),
(30, 5),
(31, 20),
(32, 20),
(33, 5),
(34, 20),
(35, 20))}}
HoleInfo = {0: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole18',
'physicsData': 'golfGreen18',
'blockers': (),
'optionalMovers': ()},
1: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole1',
'physicsData': 'golfGreen1',
'blockers': ()},
2: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole2',
'physicsData': 'golfGreen2',
'blockers': ()},
3: {'name': '',
'par': 2,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole3',
'physicsData': 'golfGreen3',
'blockers': ()},
4: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole4',
'physicsData': 'golfGreen4',
'blockers': ()},
5: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole5',
'physicsData': 'golfGreen2',
'blockers': ()},
6: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole6',
'physicsData': 'golfGreen6',
'blockers': ()},
7: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole7',
'physicsData': 'golfGreen7',
'blockers': ()},
8: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole8',
'physicsData': 'golfGreen8',
'blockers': ()},
9: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole9',
'physicsData': 'golfGreen9',
'blockers': 2},
10: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole10',
'physicsData': 'golfGreen10',
'blockers': ()},
11: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole11',
'physicsData': 'golfGreen11',
'blockers': ()},
12: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole12',
'physicsData': 'golfGreen12',
'blockers': ()},
13: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole13',
'physicsData': 'golfGreen13',
'blockers': ()},
14: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole14',
'physicsData': 'golfGreen14',
'blockers': ()},
15: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole15',
'physicsData': 'golfGreen15',
'blockers': ()},
16: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole16',
'physicsData': 'golfGreen16',
'blockers': ()},
17: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole17',
'physicsData': 'golfGreen17',
'blockers': ()},
18: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole18',
'physicsData': 'golfGreen18',
'blockers': (1, 2),
'optionalMovers': 1},
19: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole1',
'physicsData': 'golfGreen1',
'blockers': (2, 5)},
20: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole2',
'physicsData': 'golfGreen2',
'blockers': (1, 3)},
21: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole3',
'physicsData': 'golfGreen3',
'blockers': (1, 2, 3)},
22: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole4',
'physicsData': 'golfGreen4',
'blockers': 2},
23: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole5',
'physicsData': 'golfGreen5',
'blockers': (3, 4),
'optionalMovers': 1},
24: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole6',
'physicsData': 'golfGreen6',
'blockers': 1,
'optionalMovers': 1},
25: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole7',
'physicsData': 'golfGreen7',
'blockers': 3,
'optionalMovers': 1},
26: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole8',
'physicsData': 'golfGreen8',
'blockers': (),
'optionalMovers': 1},
27: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole9',
'physicsData': 'golfGreen9',
'blockers': (),
'optionalMovers': (1, 2)},
28: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole10',
'physicsData': 'golfGreen10',
'blockers': (),
'optionalMovers': (1, 2)},
29: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole11',
'physicsData': 'golfGreen11',
'blockers': (),
'optionalMovers': 1},
30: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole12',
'physicsData': 'golfGreen12',
'blockers': (1, 2, 3)},
31: {'name': '',
'par': 4,
'maxSwing': 7,
'terrainModel': 'phase_6/models/golf/hole13',
'physicsData': 'golfGreen13',
'blockers': (3, 4),
'optionalMovers': 1},
32: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole14',
'physicsData': 'golfGreen14',
'blockers': 1,
'optionalMovers': 1},
33: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole15',
'physicsData': 'golfGreen15',
'blockers': (1, 2, 3),
'optionalMovers': (1, 2)},
34: {'name': '',
'par': 3,
'maxSwing': 6,
'terrainModel': 'phase_6/models/golf/hole16',
'physicsData': 'golfGreen16',
'blockers': (1, 2, 5, 6),
'optionalMovers': 1},
35: {'name': '',
'par': 4,
'maxSwing': 7,
'terrainModel': 'phase_6/models/golf/hole17',
'physicsData': 'golfGreen17',
'blockers': (3, 4, 5)}}
for holeId in HoleInfo:
if type(HoleInfo[holeId]['blockers']) == type(0):
blockerNum = HoleInfo[holeId]['blockers']
HoleInfo[holeId]['blockers'] = (blockerNum,)
if HoleInfo[holeId].has_key('optionalMovers'):
if type(HoleInfo[holeId]['optionalMovers']) == type(0):
blockerNum = HoleInfo[holeId]['optionalMovers']
HoleInfo[holeId]['optionalMovers'] = (blockerNum,)
DistanceToBeInHole = 0.75
CoursesCompleted = 0
CoursesUnderPar = 1
HoleInOneShots = 2
EagleOrBetterShots = 3
BirdieOrBetterShots = 4
ParOrBetterShots = 5
MultiPlayerCoursesCompleted = 6
CourseZeroWins = 7
CourseOneWins = 8
CourseTwoWins = 9
TwoPlayerWins = 10
ThreePlayerWins = 11
FourPlayerWins = 12
MaxHistoryIndex = 9
NumHistory = MaxHistoryIndex + 1
CalcOtherHoleBest = False
CalcOtherCourseBest = False
TrophyRequirements = {CoursesCompleted: (6, 30, 60),
CoursesUnderPar: (1, 10, 50),
HoleInOneShots: (1, 10, 40),
EagleOrBetterShots: (1, 20, 50),
BirdieOrBetterShots: (1, 50, 100),
ParOrBetterShots: (1, 100, 150),
MultiPlayerCoursesCompleted: (10, 30, 60),
CourseZeroWins: (1, 10, 30),
CourseOneWins: (1, 10, 20),
CourseTwoWins: (1, 5, 10)}
PlayerColors = [(0.925, 0.168, 0.168, 1),
(0.13, 0.59, 0.973, 1),
(0.973, 0.809, 0.129, 1),
(0.598, 0.402, 0.875, 1)]
KartColors = [[[0, 50], [90, 255], [0, 85]], [[160, 255], [-15, 15], [0, 120]], [[160, 255], [0, 110], [0, 110]]]
NumTrophies = 0
for key in TrophyRequirements:
NumTrophies += len(TrophyRequirements[key])
NumCups = 3
TrophiesPerCup = NumTrophies / NumCups
def calcTrophyListFromHistory(history):
retval = []
historyIndex = 0
for trophyIndex in xrange(NumHistory):
requirements = TrophyRequirements[trophyIndex]
for amountNeeded in requirements:
if history[historyIndex] >= amountNeeded:
retval.append(True)
else:
retval.append(False)
historyIndex += 1
return retval
def calcCupListFromHistory(history):
retval = [False] * NumCups
trophyList = calcTrophyListFromHistory(history)
numTrophiesWon = 0
for gotTrophy in trophyList:
if gotTrophy:
numTrophiesWon += 1
for cupIndex in xrange(len(retval)):
threshold = (cupIndex + 1) * TrophiesPerCup
if threshold <= numTrophiesWon:
retval[cupIndex] = True
return retval
def getCourseName(courseId):
from toontown.toonbase import TTLocalizer
if courseId in CourseInfo:
if not CourseInfo[courseId]['name']:
CourseInfo[courseId]['name'] = TTLocalizer.GolfCourseNames[courseId]
return CourseInfo[courseId]['name']
else:
return ''
def getHoleName(holeId):
from toontown.toonbase import TTLocalizer
if holeId in HoleInfo:
if not HoleInfo[holeId]['name']:
HoleInfo[holeId]['name'] = TTLocalizer.GolfHoleNames[holeId]
return HoleInfo[holeId]['name']
else:
return ''
def getHistoryIndexForTrophy(trophyIndex):
retval = -1
divBy3 = int(trophyIndex / 3)
if divBy3 < NumHistory:
retval = divBy3
return retval
def packGolfHoleBest(holeBest):
retval = []
shiftLeft = False
for hole in holeBest:
hole &= 15
if shiftLeft:
retval[-1] |= hole << 4
shiftLeft = False
else:
retval.append(hole)
shiftLeft = True
return retval
def unpackGolfHoleBest(packedHoleBest):
retval = []
for packedHole in packedHoleBest:
lowbitHole = packedHole & 15
retval.append(lowbitHole)
highBitHole = (packedHole & 240) >> 4
retval.append(highBitHole)
return retval | apache-2.0 | -2,305,301,541,882,050,600 | 26.158009 | 113 | 0.496041 | false |
fluxer/spm | nuitka/nuitka/Builtins.py | 1 | 6584 | # Copyright 2016, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Built-ins module. Information about built-ins of the running Python.
"""
import functools
import sys
from types import BuiltinFunctionType, FunctionType, GeneratorType
from nuitka.__past__ import iterItems
from nuitka.PythonVersions import python_version
def _getBuiltinExceptionNames():
def isExceptionName(builtin_name):
if builtin_name.endswith("Error") or \
builtin_name.endswith("Exception"):
return True
elif builtin_name in ("StopIteration", "GeneratorExit", "SystemExit",
"NotImplemented", "KeyboardInterrupt",
"StopAsyncIteration"):
return True
else:
return False
# Hide Python3 changes for built-in exception names
try:
import exceptions
names = [
str(name) for name in dir(exceptions)
if isExceptionName(name)
]
values = {}
for key in names:
values[key] = getattr(exceptions, key)
for key in dir(sys.modules["__builtin__"]):
name = str(key)
if isExceptionName(name):
names.append(key)
values[name] = getattr(sys.modules["__builtin__"], key)
except ImportError:
exceptions = {}
for key, value in sys.modules["builtins"].__dict__.items():
if isExceptionName(key):
exceptions[key] = value
names = [
key for key, value in exceptions.items()
]
values = {}
for key, value in exceptions.items():
values[key] = value
return names, values
builtin_exception_names, builtin_exception_values = _getBuiltinExceptionNames()
# Just to make sure it's covering these cases correctly.
assert "TypeError" in builtin_exception_names
assert "ValueError" in builtin_exception_names
assert "StopIteration" in builtin_exception_names
assert "GeneratorExit" in builtin_exception_names
assert "AssertionError" in builtin_exception_names
assert "BaseException" in builtin_exception_names
assert "Exception" in builtin_exception_names
assert "NotImplemented" in builtin_exception_names
assert "StopAsyncIteration" in builtin_exception_names or python_version < 350
def _getBuiltinNames():
names = [
str(x)
for x in __builtins__.keys()
]
for builtin_exception_name in builtin_exception_names:
if builtin_exception_name in names:
names.remove(builtin_exception_name)
names.remove("__doc__")
names.remove("__name__")
names.remove("__package__")
warnings = []
for builtin_name in names:
if builtin_name.endswith("Warning"):
warnings.append(builtin_name)
for builtin_name in warnings:
names.remove(builtin_name)
return names, warnings
builtin_names, builtin_warnings = _getBuiltinNames()
assert "__import__" in builtin_names
assert "int" in builtin_names
assert "__doc__" not in builtin_names
assert "sys" not in builtin_names
builtin_all_names = builtin_names + builtin_exception_names + builtin_warnings
def getBuiltinTypeNames():
result = []
for builtin_name in builtin_names:
if isinstance(__builtins__[builtin_name],type):
result.append(builtin_name)
return tuple(sorted(result))
builtin_type_names = getBuiltinTypeNames()
def _getAnonBuiltins():
with open(sys.executable) as any_file:
anon_names = {
# Strangely not Python3 types module
"NoneType" : type(None),
"ellipsis" : type(Ellipsis), # see above
"NotImplementedType" : type(NotImplemented),
"function" : FunctionType,
"builtin_function_or_method" : BuiltinFunctionType,
# Can't really have it any better way.
"compiled_function" : BuiltinFunctionType,
"generator" : GeneratorType,
"compiled_generator" : GeneratorType, # see above
"code" : type(_getAnonBuiltins.__code__),
"file" : type(any_file)
}
anon_codes = {
"NoneType" : "Py_TYPE( Py_None )",
"ellipsis" : "&PyEllipsis_Type",
"NotImplementedType" : "Py_TYPE( Py_NotImplemented )",
"function" : "&PyFunction_Type",
"builtin_function_or_method" : "&PyCFunction_Type",
"compiled_function" : "&Nuitka_Function_Type",
"compiled_generator" : "&Nuitka_Generator_Type",
"code" : "&PyCode_Type",
"file" : "&PyFile_Type"
}
if python_version < 300:
from types import ClassType, InstanceType, MethodType
anon_names["classobj"] = ClassType
anon_codes["classobj"] = "&PyClass_Type"
anon_names["instance"] = InstanceType
anon_codes["instance"] = "&PyInstance_Type"
anon_names["instancemethod"] = MethodType
anon_codes["instancemethod"] = "&PyMethod_Type"
return anon_names, anon_codes
builtin_anon_names, builtin_anon_codes = _getAnonBuiltins()
def calledWithBuiltinArgumentNamesDecorator(f):
""" Allow a function to be called with an "_arg" if a built-in name.
This avoids using built-in names in Nuitka source, while enforcing
a policy how to make them pretty.
"""
@functools.wraps(f)
def wrapper(*args, **kw):
new_kw = {}
for key, value in iterItems(kw):
if key in builtin_all_names:
key = key + "_arg"
new_kw[key] = value
return f(*args, **new_kw)
return wrapper
| gpl-2.0 | 8,219,563,310,531,405,000 | 30.806763 | 79 | 0.605559 | false |
seanbell/opensurfaces | server/intrinsic/algorithm/grosse2009/intrinsic.py | 1 | 10552 | import itertools
import numpy as np
import os
import png
import sys
import poisson
############################### Data ###########################################
def load_png(fname):
reader = png.Reader(fname)
w, h, pngdata, params = reader.read()
image = np.vstack(itertools.imap(np.uint16, pngdata))
if image.size == 3*w*h:
image = np.reshape(image, (h, w, 3))
return image.astype(float) / 255.
def load_object_helper(tag, condition):
"""Load an image of a given object as a NumPy array. The values condition may take are:
'mask', 'original', 'diffuse', 'shading', 'reflectance', 'specular'
'shading' returns a grayscale image, and all the other options return color images."""
assert condition in ['mask', 'original', 'diffuse', 'shading', 'reflectance', 'specular']
obj_dir = os.path.join('data', tag)
if condition == 'mask':
filename = os.path.join(obj_dir, 'mask.png')
mask = load_png(filename)
return (mask > 0)
if condition == 'original':
filename = os.path.join(obj_dir, 'original.png')
return load_png(filename)
if condition == 'diffuse':
filename = os.path.join(obj_dir, 'diffuse.png')
return load_png(filename)
if condition == 'shading':
filename = os.path.join(obj_dir, 'shading.png')
return load_png(filename)
if condition == 'reflectance':
filename = os.path.join(obj_dir, 'reflectance.png')
return load_png(filename)
if condition == 'specular':
filename = os.path.join(obj_dir, 'specular.png')
return load_png(filename)
# cache for efficiency because PyPNG is pure Python
cache = {}
def load_object(tag, condition):
if (tag, condition) not in cache:
cache[tag, condition] = load_object_helper(tag, condition)
return cache[tag, condition]
def load_multiple(tag):
"""Load the images of a given object for all lighting conditions. Returns an
m x n x 3 x 10 NumPy array, where the third dimension is the color channel and
the fourth dimension is the image number."""
obj_dir = os.path.join('data', tag)
filename = os.path.join(obj_dir, 'light01.png')
img0 = load_png(filename)
result = np.zeros(img0.shape + (10,))
for i in range(10):
filename = os.path.join(obj_dir, 'light%02d.png' % (i+1))
result[:,:,:,i] = load_png(filename)
return result
############################# Error metric #####################################
def ssq_error(correct, estimate, mask):
"""Compute the sum-squared-error for an image, where the estimate is
multiplied by a scalar which minimizes the error. Sums over all pixels
where mask is True. If the inputs are color, each color channel can be
rescaled independently."""
assert correct.ndim == 2
if np.sum(estimate**2 * mask) > 1e-5:
alpha = np.sum(correct * estimate * mask) / np.sum(estimate**2 * mask)
else:
alpha = 0.
return np.sum(mask * (correct - alpha*estimate) ** 2)
def local_error(correct, estimate, mask, window_size, window_shift):
"""Returns the sum of the local sum-squared-errors, where the estimate may
be rescaled within each local region to minimize the error. The windows are
window_size x window_size, and they are spaced by window_shift."""
M, N = correct.shape[:2]
ssq = total = 0.
for i in range(0, M - window_size + 1, window_shift):
for j in range(0, N - window_size + 1, window_shift):
correct_curr = correct[i:i+window_size, j:j+window_size]
estimate_curr = estimate[i:i+window_size, j:j+window_size]
mask_curr = mask[i:i+window_size, j:j+window_size]
ssq += ssq_error(correct_curr, estimate_curr, mask_curr)
total += np.sum(mask_curr * correct_curr**2)
assert -np.isnan(ssq/total)
return ssq / total
def score_image(true_shading, true_refl, estimate_shading, estimate_refl, mask, window_size=20):
return 0.5 * local_error(true_shading, estimate_shading, mask, window_size, window_size//2) + \
0.5 * local_error(true_refl, estimate_refl, mask, window_size, window_size//2)
################################## Algorithms ##################################
def retinex(image, mask, threshold, L1=False):
image = np.clip(image, 3., np.infty)
log_image = np.where(mask, np.log(image), 0.)
i_y, i_x = poisson.get_gradients(log_image)
r_y = np.where(np.abs(i_y) > threshold, i_y, 0.)
r_x = np.where(np.abs(i_x) > threshold, i_x, 0.)
if L1:
log_refl = poisson.solve_L1(r_y, r_x, mask)
else:
log_refl = poisson.solve(r_y, r_x, mask)
refl = mask * np.exp(log_refl)
return np.where(mask, image / refl, 0.), refl
def project_gray(i_y):
i_y_mean = np.mean(i_y, axis=2)
result = np.zeros(i_y.shape)
for i in range(3):
result[:,:,i] = i_y_mean
return result
def project_chromaticity(i_y):
return i_y - project_gray(i_y)
def color_retinex(image, mask, threshold_gray, threshold_color, L1=False):
image = np.clip(image, 3., np.infty)
log_image = np.log(image)
i_y_orig, i_x_orig = poisson.get_gradients(log_image)
i_y_gray, i_y_color = project_gray(i_y_orig), project_chromaticity(i_y_orig)
i_x_gray, i_x_color = project_gray(i_x_orig), project_chromaticity(i_x_orig)
image_grayscale = np.mean(image, axis=2)
image_grayscale = np.clip(image_grayscale, 3., np.infty)
log_image_grayscale = np.log(image_grayscale)
i_y, i_x = poisson.get_gradients(log_image_grayscale)
norm = np.sqrt(np.sum(i_y_color**2, axis=2))
i_y_match = (norm > threshold_color) + (np.abs(i_y_gray[:,:,0]) > threshold_gray)
norm = np.sqrt(np.sum(i_x_color**2, axis=2))
i_x_match = (norm > threshold_color) + (np.abs(i_x_gray[:,:,0]) > threshold_gray)
r_y = np.where(i_y_match, i_y, 0.)
r_x = np.where(i_x_match, i_x, 0.)
if L1:
log_refl = poisson.solve_L1(r_y, r_x, mask)
else:
log_refl = poisson.solve(r_y, r_x, mask)
refl = np.exp(log_refl)
return image_grayscale / refl, refl
def weiss(image, multi_images, mask, L1=False):
multi_images = np.clip(multi_images, 3., np.infty)
log_multi_images = np.log(multi_images)
i_y_all, i_x_all = poisson.get_gradients(log_multi_images)
r_y = np.median(i_y_all, axis=2)
r_x = np.median(i_x_all, axis=2)
if L1:
log_refl = poisson.solve_L1(r_y, r_x, mask)
else:
log_refl = poisson.solve(r_y, r_x, mask)
refl = np.where(mask, np.exp(log_refl), 0.)
shading = np.where(mask, image / refl, 0.)
return shading, refl
def weiss_retinex(image, multi_images, mask, threshold, L1=False):
multi_images = np.clip(multi_images, 3., np.infty)
log_multi_images = np.log(multi_images)
i_y_all, i_x_all = poisson.get_gradients(log_multi_images)
r_y = np.median(i_y_all, axis=2)
r_x = np.median(i_x_all, axis=2)
r_y *= (np.abs(r_y) > threshold)
r_x *= (np.abs(r_x) > threshold)
if L1:
log_refl = poisson.solve_L1(r_y, r_x, mask)
else:
log_refl = poisson.solve(r_y, r_x, mask)
refl = np.where(mask, np.exp(log_refl), 0.)
shading = np.where(mask, image / refl, 0.)
return shading, refl
#################### Wrapper classes for experiments ###########################
class BaselineEstimator:
"""Assume every image is entirely shading or entirely reflectance."""
def __init__(self, mode, L1=False):
assert mode in ['refl', 'shading']
self.mode = mode
def estimate_shading_refl(self, image, mask, L1=False):
if self.mode == 'refl':
refl = image
shading = 1. * mask
else:
refl = 1. * mask
shading = image
return shading, refl
@staticmethod
def get_input(tag):
image = load_object(tag, 'diffuse')
image = np.mean(image, axis=2)
mask = load_object(tag, 'mask')
return image, mask
@staticmethod
def param_choices():
return [{'mode': m} for m in ['shading', 'refl']]
class GrayscaleRetinexEstimator:
def __init__(self, threshold):
self.threshold = threshold
def estimate_shading_refl(self, image, mask, L1=False):
return retinex(image, mask, self.threshold, L1)
@staticmethod
def get_input(tag):
image = load_object(tag, 'diffuse')
image = np.mean(image, axis=2)
mask = load_object(tag, 'mask')
return image, mask
@staticmethod
def param_choices():
return [{'threshold': t} for t in np.logspace(-3., 1., 15)]
class ColorRetinexEstimator:
def __init__(self, threshold_gray, threshold_color, L1=False):
self.threshold_gray = threshold_gray
self.threshold_color = threshold_color
def estimate_shading_refl(self, image, mask, L1=False):
return color_retinex(image, mask, self.threshold_gray, self.threshold_color, L1)
@staticmethod
def get_input(tag):
image = load_object(tag, 'diffuse')
mask = load_object(tag, 'mask')
return image, mask
@staticmethod
def param_choices():
return [{'threshold_gray': tg, 'threshold_color': tc}
for tg in np.logspace(-1.5, 0., 5)
for tc in np.logspace(-1.5, 0., 5)]
class WeissEstimator:
def estimate_shading_refl(self, image, multi_images, mask, L1=False):
return weiss(image, multi_images, mask, L1)
@staticmethod
def get_input(tag):
image = load_object(tag, 'diffuse')
image = np.mean(image, axis=2)
mask = load_object(tag, 'mask')
multi_images = load_multiple(tag)
multi_images = np.mean(multi_images, axis=2)
return image, multi_images, mask
@staticmethod
def param_choices():
return [{}]
class WeissRetinexEstimator:
def __init__(self, threshold=0.1, L1=False):
self.threshold = threshold
def estimate_shading_refl(self, image, multi_images, mask, L1=False):
return weiss_retinex(image, multi_images, mask, self.threshold, L1)
@staticmethod
def get_input(tag):
image = load_object(tag, 'diffuse')
image = np.mean(image, axis=2)
mask = load_object(tag, 'mask')
multi_images = load_multiple(tag)
multi_images = np.mean(multi_images, axis=2)
return image, multi_images, mask
@staticmethod
def param_choices():
return [{'threshold': t} for t in np.logspace(-3., 1., 15)]
| mit | -6,676,248,594,824,166,000 | 32.18239 | 99 | 0.60453 | false |
sigurdga/samklang-blog | samklang_blog/views.py | 1 | 2247 | from django.http import HttpResponseRedirect
from django.views.generic.edit import CreateView, UpdateView
from django.views.generic.dates import ArchiveIndexView, YearArchiveView, MonthArchiveView, DateDetailView
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.contrib.sites.models import Site
from samklang_blog.models import Entry
from samklang_blog.forms import EntryForm
from datetime import datetime
MONTH_FORMAT = '%m'
class EntryCreateView(CreateView):
model = Entry
form_class = EntryForm
initial = {'pub_date': datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
month_format = MONTH_FORMAT
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.user = self.request.user
if hasattr(self.request, 'site'):
self.object.site = self.request.site
else:
self.object.site = Site.objects.get(pk=1)
self.object.save()
return HttpResponseRedirect(self.object.get_absolute_url())
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(EntryCreateView, self).dispatch(*args, **kwargs)
class EntryUpdateView(UpdateView):
model = Entry
form_class = EntryForm
month_format = MONTH_FORMAT
#def form_valid(self, form):
# self.object = form.save()
# return HttpResponseRedirect(self.object.get_absolute_url())
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(EntryUpdateView, self).dispatch(*args, **kwargs)
class EntryArchiveIndexView(ArchiveIndexView):
model = Entry
date_field = 'pub_date'
month_format = MONTH_FORMAT
allow_empty = True
def get_queryset(self):
return Entry.live.all()
class EntryYearArchiveView(YearArchiveView):
model = Entry
date_field = 'pub_date'
month_format = MONTH_FORMAT
allow_empty = True
class EntryMonthArchiveView(MonthArchiveView):
model = Entry
date_field = 'pub_date'
month_format = MONTH_FORMAT
allow_empty = True
class EntryDateDetailView(DateDetailView):
model = Entry
date_field = 'pub_date'
month_format = MONTH_FORMAT
| agpl-3.0 | -2,393,359,891,051,174,000 | 29.780822 | 106 | 0.70227 | false |
czpython/django-cms | cms/page_rendering.py | 1 | 2938 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.core.urlresolvers import resolve, Resolver404, reverse
from django.http import Http404
from django.shortcuts import render
from django.template.response import TemplateResponse
from cms import __version__
from cms.cache.page import set_page_cache
from cms.models import Page
from cms.utils.conf import get_cms_setting
from cms.utils.page import get_page_template_from_request
from cms.utils.page_permissions import user_can_change_page, user_can_view_page
def render_page(request, page, current_language, slug):
"""
Renders a page
"""
context = {}
context['lang'] = current_language
context['current_page'] = page
context['has_change_permissions'] = user_can_change_page(request.user, page)
context['has_view_permissions'] = user_can_view_page(request.user, page)
if not context['has_view_permissions']:
return _handle_no_page(request)
template = get_page_template_from_request(request)
response = TemplateResponse(request, template, context)
response.add_post_render_callback(set_page_cache)
# Add headers for X Frame Options - this really should be changed upon moving to class based views
xframe_options = page.get_xframe_options()
# xframe_options can be None if there's no xframe information on the page
# (eg. a top-level page which has xframe options set to "inherit")
if xframe_options == Page.X_FRAME_OPTIONS_INHERIT or xframe_options is None:
# This is when we defer to django's own clickjacking handling
return response
# We want to prevent django setting this in their middlewear
response.xframe_options_exempt = True
if xframe_options == Page.X_FRAME_OPTIONS_ALLOW:
# Do nothing, allowed is no header.
return response
elif xframe_options == Page.X_FRAME_OPTIONS_SAMEORIGIN:
response['X-Frame-Options'] = 'SAMEORIGIN'
elif xframe_options == Page.X_FRAME_OPTIONS_DENY:
response['X-Frame-Options'] = 'DENY'
return response
def render_object_structure(request, obj):
context = {
'object': obj,
'cms_toolbar': request.toolbar,
}
return render(request, 'cms/toolbar/structure.html', context)
def _handle_no_page(request):
try:
#add a $ to the end of the url (does not match on the cms anymore)
resolve('%s$' % request.path)
except Resolver404 as e:
# raise a django http 404 page
exc = Http404(dict(path=request.path, tried=e.args[0]['tried']))
raise exc
raise Http404('CMS Page not found: %s' % request.path)
def _render_welcome_page(request):
context = {
'cms_version': __version__,
'cms_edit_on': get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'),
'django_debug': settings.DEBUG,
'next_url': reverse('pages-root'),
}
return TemplateResponse(request, "cms/welcome.html", context)
| bsd-3-clause | 3,639,318,368,674,407,400 | 35.725 | 102 | 0.687543 | false |
rivasd/djPsych | djreceive/migrations/0019_singleaudiotrial.py | 1 | 1328 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-01-04 19:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('djreceive', '0018_auto_20170104_1418'),
]
operations = [
migrations.CreateModel(
name='SingleAudioTrial',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('internal_node_id', models.CharField(max_length=24)),
('trial_index', models.IntegerField()),
('trial_type', models.CharField(max_length=32)),
('time_elapsed', models.IntegerField()),
('timeout', models.BooleanField(default=False)),
('extra_data', jsonfield.fields.JSONField(blank=True, null=True)),
('stimulus', models.CharField(max_length=128)),
('key_press', models.IntegerField()),
('rt', models.IntegerField()),
('run', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='djreceive.Run')),
],
options={
'abstract': False,
},
),
]
| gpl-3.0 | -2,971,340,042,235,738,600 | 35.888889 | 114 | 0.565512 | false |
lrocheWB/navitia | source/jormungandr/jormungandr/scenarios/helpers.py | 1 | 8430 | # Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from navitiacommon import response_pb2
from operator import attrgetter
def has_walking_first(journey):
for section in journey.sections:
if section.type == response_pb2.PUBLIC_TRANSPORT:
return True
elif section.type == response_pb2.CROW_FLY \
and section.street_network.mode != response_pb2.Walking:
return False
elif section.type == response_pb2.STREET_NETWORK \
and section.street_network.mode != response_pb2.Walking:
return False
return True
def has_bike_first(journey):
for section in journey.sections:
if section.type == response_pb2.PUBLIC_TRANSPORT:
return True
elif section.type == response_pb2.CROW_FLY \
and section.street_network.mode != response_pb2.Bike:
return False
elif section.type == response_pb2.STREET_NETWORK \
and section.street_network.mode != response_pb2.Bike:
return False
return True
def has_bss_first(journey):
has_bss = False
for section in journey.sections:
if section.type == response_pb2.PUBLIC_TRANSPORT:
return False
elif section.type == response_pb2.BSS_RENT:
return True
return False
def has_walking_last(journey):
has_pt = False
for section in journey.sections:
if section.type == response_pb2.PUBLIC_TRANSPORT:
has_pt = True
elif has_pt \
and section.type == response_pb2.CROW_FLY \
and section.street_network.mode != response_pb2.Walking:
return False
elif has_pt \
and section.type == response_pb2.STREET_NETWORK \
and section.street_network.mode != response_pb2.Walking:
return False
return has_pt#we will not be here if there is another fallback mode used after the pt section
def has_bike_last(journey):
has_pt = False
for section in journey.sections:
if section.type == response_pb2.PUBLIC_TRANSPORT:
has_pt = True
elif has_pt \
and section.type == response_pb2.CROW_FLY \
and section.street_network.mode != response_pb2.Bike:
return False
elif has_pt \
and section.type == response_pb2.STREET_NETWORK \
and section.street_network.mode != response_pb2.Bike:
return False
return has_pt#we will not be here if there is another fallback mode used after the pt section
def has_bss_last(journey):
has_pt = False
for section in journey.sections:
if section.type == response_pb2.PUBLIC_TRANSPORT:
has_pt = True
elif has_pt and section.type == response_pb2.BSS_RENT:
return True
return False
def has_bss_first_and_walking_last(journey):
return has_bss_first(journey) and has_walking_last(journey)
def has_walking_first_and_bss_last(journey):
return has_walking_first(journey) and has_bss_last(journey)
def has_bss_first_and_bss_last(journey):
return has_bss_first(journey) and has_bss_last(journey)
def has_bike_first_and_walking_last(journey):
return has_bike_first(journey) and has_walking_last(journey)
def has_bike_first_and_bss_last(journey):
return has_bike_first(journey) and has_bss_last(journey)
def bike_duration(journey):
duration = 0
in_bss = False
for section in journey.sections:
if section.type == response_pb2.BSS_RENT:
in_bss = True
if section.type == response_pb2.BSS_PUT_BACK:
in_bss = False
if section.type in (response_pb2.STREET_NETWORK, response_pb2.CROW_FLY) \
and section.street_network.mode == response_pb2.Bike \
and not in_bss:
duration = duration + section.duration
return duration
def bss_duration(journey):
duration = 0
in_bss = False
for section in journey.sections:
if section.type == response_pb2.BSS_RENT:
in_bss = True
duration += section.duration
if section.type == response_pb2.BSS_PUT_BACK:
in_bss = False
duration += section.duration
if section.type in (response_pb2.STREET_NETWORK, response_pb2.CROW_FLY) \
and section.street_network.mode == response_pb2.Bike \
and in_bss:
duration = duration + section.duration
return duration
def car_duration(journey):
duration = 0
for section in journey.sections:
if section.type in (response_pb2.STREET_NETWORK, response_pb2.CROW_FLY) \
and section.street_network.mode == response_pb2.Car:
duration = duration + section.duration
return duration
def walking_duration(journey):
duration = 0
for section in journey.sections:
if section.type in (response_pb2.STREET_NETWORK, response_pb2.CROW_FLY) \
and section.street_network.mode == response_pb2.Walking:
duration = duration + section.duration
return duration
def pt_duration(journey):
duration = 0
for section in journey.sections:
if section.type == response_pb2.PUBLIC_TRANSPORT:
duration = duration + section.duration
return duration
def is_non_pt_bss(journey):
return journey.type == 'non_pt_bss'
def is_non_pt_walk(journey):
return journey.type == 'non_pt_walk'
def is_non_pt_bike(journey):
return journey.type == 'non_pt_bike'
max_duration_fallback_modes = {'walking': [response_pb2.Walking],
'bss': [response_pb2.Walking, response_pb2.Bss],
'bike': [response_pb2.Walking, response_pb2.Bss, response_pb2.Bike],
'car': [response_pb2.Walking, response_pb2.Bss, response_pb2.Bike, response_pb2.Car],
}
def filter_journeys_by_fallback_modes(journeys, fallback_modes):
section_is_fallback_or_pt = lambda section: section.type not in \
(response_pb2.STREET_NETWORK, response_pb2.CROW_FLY) \
or section.street_network.mode in fallback_modes
filter_journey = lambda journey: all(section_is_fallback_or_pt(section) for section in journey.sections) \
and journey.duration > 0
return filter(filter_journey, journeys)
def select_best_journey_by_time(journeys, clockwise, fallback_modes):
list_journeys = filter_journeys_by_fallback_modes(journeys, fallback_modes)
if not list_journeys:
return None
if clockwise:
return min(list_journeys, key=attrgetter('arrival_date_time'))
else:
return max(list_journeys, key=attrgetter('departure_date_time'))
def select_best_journey_by_duration(journeys, clockwise, fallback_modes):
list_journeys = filter_journeys_by_fallback_modes(journeys, fallback_modes)
if not list_journeys:
return None
return min(list_journeys, key=attrgetter('duration'))
fallback_mode_order = ['walking', 'bss', 'bike', 'car']
def fallback_mode_comparator(a, b):
return fallback_mode_order.index(a) - fallback_mode_order.index(b)
| agpl-3.0 | -456,811,872,450,253,060 | 37.318182 | 116 | 0.655753 | false |
ruhan/django-silk-mongoengine | setup.py | 1 | 1322 | import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme_file:
README = readme_file.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-silk',
version='0.5.2',
packages=['silk'],
include_package_data=True,
license='MIT License',
description='Silky smooth profiling for the Django Framework',
long_description=README,
url='http://www.mtford.co.uk/projects/silk/',
author='Michael Ford',
author_email='[email protected]',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
install_requires= [
'Django',
'Pygments',
'six',
'simplejson',
'python-dateutil',
'requests',
'sqlparse',
'Jinja2',
'autopep8',
'pytz'
]
)
| mit | -4,198,657,876,189,128,000 | 28.377778 | 80 | 0.587746 | false |
UstadMobile/exelearning-ustadmobile-work | testing/runtests.py | 1 | 3225 | #!/usr/bin/python
# ===========================================================================
# config unittest
# Copyright 2004, University of Auckland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
import sys
import os
sys.path.insert(0, '.')
import unittest
from testconfig import TestConfig
from testchecker import TestChecker
from testconfigparser import TestConfigParser, TestSections
from testnode import TestNode
from testuniqueid import TestUniqueId
##from testxmlhttp import TestOutline
from testpackage import TestPackage
#from testblock import TestBlock
from testidevice import TestIdevice
#from testidevicestore import TestIdeviceStore
#from testpersist import TestPersist
from testexport import TestWebsiteExport
##from testexport import TestScormMetaExport
##from testexport import TestScormNoMetaExport
#from testresource import TestResource
#from testforumscache import TestForumsCache
from testresources import TestResources
from testblockfactory import TestBlockFactory
from testustadmobileexport import TestUstadMobileExport
# ===========================================================================
if __name__ == "__main__":
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestConfig))
suite.addTest(unittest.makeSuite(TestChecker))
suite.addTest(unittest.makeSuite(TestConfigParser))
suite.addTest(unittest.makeSuite(TestSections))
suite.addTest(unittest.makeSuite(TestNode))
suite.addTest(unittest.makeSuite(TestUniqueId))
## suite.addTest(unittest.makeSuite(TestOutline))
suite.addTest(unittest.makeSuite(TestPackage))
# suite.addTest(unittest.makeSuite(TestBlock))
suite.addTest(TestBlockFactory())
suite.addTest(unittest.makeSuite(TestIdevice))
# suite.addTest(unittest.makeSuite(TestIdeviceStore))
# suite.addTest(unittest.makeSuite(TestPersist))
suite.addTest(unittest.makeSuite(TestWebsiteExport))
suite.addTest(unittest.makeSuite(TestUstadMobileExport))
## suite.addTest(unittest.makeSuite(TestScormMetaExport))
## suite.addTest(unittest.makeSuite(TestScormNoMetaExport))
# suite.addTest(unittest.makeSuite(TestResource))
# #suite.addTest(unittest.makeSuite(TestForumsCache))
suite.addTest(unittest.makeSuite(TestResources))
result = unittest.TextTestRunner(verbosity=2).run(suite)
if result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
| gpl-2.0 | 2,101,990,094,750,326,500 | 41.434211 | 77 | 0.710388 | false |
HaraldWeber/client | src/ladder/__init__.py | 1 | 1144 | from PyQt4 import QtCore
from PyQt4 import QtWebKit
import logging
import urllib
import util
logger = logging.getLogger(__name__)
class Ladder(QtCore.QObject):
def __init__(self, client, *args, **kwargs):
QtCore.QObject.__init__(self, *args, **kwargs)
logger.debug("Ladder tab instantiating.")
self.client = client
self.ui = QtWebKit.QWebView()
self.client.ladderTab.layout().addWidget(self.ui)
self.loaded = False
self.client.showLadder.connect(self.reloadView)
self.ui.loadFinished.connect(self.ui.show)
@QtCore.pyqtSlot()
def reloadView(self):
if (self.loaded):
return
self.loaded = True
self.ui.setVisible(False)
#If a local theme CSS exists, skin the WebView with it
if util.themeurl("ladder/style.css"):
self.ui.settings().setUserStyleSheetUrl(util.themeurl("ladder/style.css"))
self.ui.setUrl(QtCore.QUrl("http://faforever.com/faf/leaderboards/read-leader.php?board=global&username=%s" % (self.client.login)))
| gpl-3.0 | -2,618,006,111,668,638,000 | 27.6 | 139 | 0.615385 | false |
anshengme/Angelina | apps/users/views.py | 1 | 15715 | import json
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.hashers import make_password
from django.core.exceptions import ObjectDoesNotExist # ORM get查询不到数据
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.shortcuts import render, HttpResponseRedirect, HttpResponse
from django.views.generic.base import View
from pure_pagination import Paginator, PageNotAnInteger
from courses.models import Course
from operation.models import UserCourse, UserFavorite, UserMessage
from organization.models import CourseOrg, Teacher
from utils.email_send import send_register_email
from utils.mixin_utils import LoginRequiredMixin
from .forms import LoginForm, RegisterForm, ForgetForm, ModifyPwdForm, UploadImageForm, UserInfoForm
from .models import UserProfile, EmailVerifyRecord, Banner
class CustomBackend(ModelBackend):
"""自定义auth验证,可以通过用户名邮箱登录"""
def authenticate(self, username=None, password=None, **kwargs):
try:
user = UserProfile.objects.get(Q(username=username) | Q(email=username)) # 通过用户名或邮箱获取用户是否存在
if user.check_password(password): # 如果用户密码正确返回user对象
return user
else: # 出错或者用户密码错误就返回None
return None
except Exception as e:
return None
__all__ = [
'IndexView',
'LoginView',
'LogoutView',
'ActiveUserView',
'RegisterView',
'ForgetPwdView',
'ResetView',
'ModifyPwdView',
'UserInfoView',
'UploadImageView',
'UpdatePwdView',
'SendEmailCodeView',
'UpdateEmailView',
'MyCourseView',
'MyFavOrgVIew',
'MyFavTeacherVIew',
'MyFavCourseVIew',
'MyMessageVIew'
]
# Create your views here.
class IndexView(View):
"""首页"""
def get(self, request):
all_banner = Banner.objects.all().order_by('index') # 轮播图
courses = Course.objects.filter(is_banner=False)[:6] # 课程
banner_course = Course.objects.filter(is_banner=True)[:3] # 轮播图课程
course_orgs = CourseOrg.objects.all()[:15] # 课程机构
return render(request, 'index.html', {
'all_banner': all_banner,
'courses': courses,
'banner_course': banner_course,
'course_orgs': course_orgs
})
class LoginView(View):
def get(self, request):
"""返回登录页面"""
return render(request, 'login.html', {})
def post(self, request):
"""验证用户是否是否可以成功登录"""
login_form = LoginForm(request.POST) # FORM验证传过来的值是否合法
if login_form.is_valid(): # 验证是否错误
user_name = request.POST.get('username', '') # 获取用户名
pass_word = request.POST.get('password', '') # 获取密码
user = authenticate(username=user_name, password=pass_word) # 验证用户名和密码
if user is not None: # 如果用户名和密码匹配
if user.is_active: # 如果用户是激活状态
login(request, user) # 把SESSION和COOKIE写入request
return HttpResponseRedirect(reverse('index')) # 返回首页
else: # 用户未激活
return render(request, 'login.html', {'msg': '用户尚未激活!'})
else: # 用户名和密码错误
return render(request, 'login.html', {'msg': '用户名或密码错误!'})
else: # FORM验证出错,并吧出错信息传递到前端
return render(request, 'login.html', {'login_form': login_form})
class LogoutView(View):
def get(self, request):
logout(request)
return HttpResponseRedirect(reverse('index'))
class RegisterView(View):
"""用户注册"""
def get(self, request):
register_form = RegisterForm() # 获取验证码
return render(request, 'register.html', {'register_form': register_form})
def post(self, request):
register_form = RegisterForm(request.POST) # FORM验证
if register_form.is_valid(): # 验证是否错误
user_name = request.POST.get('email', '') # 获取用户注册的邮箱
try:
UserProfile.objects.get(email=user_name) # 如果用户名已存在
return render(request, 'register.html', {'msg': '用户已存在!', 'register_form': register_form})
except ObjectDoesNotExist as e:
pass_word = request.POST.get('password', '') # 获取密码
# 保存用户信息
user_profile = UserProfile()
user_profile.username = user_name
user_profile.email = user_name
user_profile.password = make_password(pass_word) # 密码使用make_password加密之后保存
user_profile.is_active = False # 用户默认未激活
user_profile.save()
# 写入欢迎注册消息
user_message = UserMessage()
user_message.user = user_profile.id
user_message.message = "欢迎注册慕学在线网"
user_message.save()
send_register_email(email=user_name, send_type='register') # 发送用户注册邮件
return HttpResponseRedirect(reverse('login')) # 跳转到登录页面
else:
return render(request, 'register.html', {'register_form': register_form})
class ActiveUserView(View):
"""用户激活"""
def get(self, request, active_code):
"""
:param active_code: 激活的字符串
"""
try:
all_records = EmailVerifyRecord.objects.get(code=active_code) # 获取到这个CODE
except Exception as e:
# 如果没有这个code存在则返回一个错误页面
return render(request, 'active_fail.html')
if all_records:
email = all_records.email # 获取用户邮箱
user = UserProfile.objects.get(email=email) # 获取这个用户
user.is_active = True # 把用户状态改为激活
user.save() # 保存
all_records.delete() # 删除激活码
else:
# 验证码不存在
return render(request, 'active_fail.html')
return HttpResponseRedirect(reverse('login')) # 激活之后跳转到登录页面
class ForgetPwdView(View):
"""密码重置"""
def get(self, request):
forget_form = ForgetForm() # 获取重置密码Form
return render(request, 'forgetpwd.html', {'forget_form': forget_form})
def post(self, request):
forget_form = ForgetForm(request.POST)
if forget_form.is_valid(): # Form验证成功
email = request.POST.get('email', '') # 获取用户邮箱
send_register_email(email=email, send_type='forget') # 发送密码重置链接
return render(request, 'send_success.html')
else:
return render(request, 'forgetpwd.html', {'forget_form': forget_form})
class ResetView(View):
"""修改密码"""
def get(self, request, reset_code):
try:
all_records = EmailVerifyRecord.objects.get(code=reset_code) # 取出验证码
except Exception as e:
return render(request, 'active_fail.html')
if all_records:
email = all_records.email # 获取email
all_records.delete() # 删除验证码
return render(request, 'password_reset.html', {'email': email})
else:
return render(request, 'active_fail.html')
class ModifyPwdView(View):
"""修改用户密码"""
def post(self, request):
modify_form = ModifyPwdForm(request.POST) # 验证参数
if modify_form.is_valid(): # 验证是否铸错
# 取出用户的用户名和密码
pwd1 = request.POST.get('password1', '')
pwd2 = request.POST.get('password2', '')
email = request.POST.get('email', '')
if pwd1 != pwd2: # 两个密码是否一致
return render(request, 'password_reset.html',
{'email': email, 'msg': '密码不一致!'})
user = UserProfile.objects.get(email=email) # 获取用户
user.password = make_password(pwd2) # 修改密码
user.save() # 保存到数据库
return HttpResponseRedirect(reverse('login')) # 跳转到登录页面
else:
email = request.POST.get('email', None)
return render(request, 'password_reset.html',
{'email': email, 'modify_form': modify_form})
class UserInfoView(LoginRequiredMixin, View):
"""用户个人信息"""
def get(self, request):
return render(request, 'usercenter-info.html')
def post(self, request):
user_info_form = UserInfoForm(request.POST, instance=request.user) # 通过Form保存用户信息
if user_info_form.is_valid(): # 是否报错
user_info_form.save() # 保存到数据库
return HttpResponse('{"status":"success"}', content_type='application/json')
else:
return HttpResponse(json.dumps(user_info_form.errors), content_type='application/json')
class UploadImageView(LoginRequiredMixin, View):
"""用户头像上传"""
def post(self, request):
# 文件类型需要传递两个参数,第三个参数返回一个UserProfile对象
image_form = UploadImageForm(request.POST, request.FILES, instance=request.user)
if image_form.is_valid(): # 是否验证通过
request.user.save() # 保存到数据库
return HttpResponse('{"status":"success"}', content_type='application/json')
return HttpResponse('{"status":"fail"}', content_type='application/json')
class UpdatePwdView(View):
"""个人中心修改用户密码"""
def post(self, request):
modify_form = ModifyPwdForm(request.POST) # 验证密码
if modify_form.is_valid(): # 是否验证成功
pwd1 = request.POST.get('password1', '')
pwd2 = request.POST.get('password2', '')
if pwd1 != pwd2: # 密码是否一致
return HttpResponse('{"status":"fail","msg":"密码不一致"}', content_type='application/json')
user = request.user # 获取用户
user.password = make_password(pwd2) # 更新密码
user.save() # 保存到数据库
return HttpResponse('{"status":"success","msg":"密码修改成功"}', content_type='application/json')
else:
return HttpResponse(json.dumps(modify_form.errors), content_type='application/json')
class SendEmailCodeView(LoginRequiredMixin, View):
"""发送邮箱验证码"""
def get(self, request):
email = request.GET.get('email', '') # 获取邮箱
if UserProfile.objects.filter(email=email): # 邮箱是否存在
return HttpResponse('"email":"邮箱已经存在"}', content_type='application/json')
send_register_email(email, 'update_email') # 发送邮件
return HttpResponse('{"status":"success"}', content_type='application/json')
class UpdateEmailView(LoginRequiredMixin, View):
"""修改个人邮箱"""
def post(self, request):
email = request.POST.get('email', '') # 获取邮箱
code = request.POST.get('code', '') # 获取验证码
existed_records = EmailVerifyRecord.objects.filter(email=email, code=code,
send_type='update_email') # 邮箱是否能匹配到验证码
if existed_records: # 如果有
user = request.user # 获取的用户
user.email = email # 更改邮箱
user.save() # 保存到数据库
return HttpResponse('{"status":"success"}', content_type='application/json')
else:
return HttpResponse('"email":"验证码出错"}', content_type='application/json')
class MyCourseView(LoginRequiredMixin, View):
"""我学习的课程"""
def get(self, request):
user_courses = UserCourse.objects.filter(user=request.user) # 获取用户的所有课程
return render(request, 'usercenter-mycourse.html', {
'user_courses': user_courses
})
class MyFavOrgVIew(LoginRequiredMixin, View):
"""我收藏的课程机构"""
def get(self, request):
org_list = [] # 机构列表
fav_orgs = UserFavorite.objects.filter(user=request.user, fav_type=2) # 当前用户收藏的课程机构
for fav_org in fav_orgs:
org_id = fav_org.fav_id # 获取机构ID
org = CourseOrg.objects.get(id=org_id) # 获取指定的机构
org_list.append(org) # 把机构添加到列表中
return render(request, 'usercenter-fav-org.html', {
'org_list': org_list
})
class MyFavTeacherVIew(LoginRequiredMixin, View):
"""我收藏的机构讲师"""
def get(self, request):
teacher_list = []
fav_teacher = UserFavorite.objects.filter(user=request.user, fav_type=3)
for teacher in fav_teacher:
teacher_id = teacher.fav_id
teacher = Teacher.objects.get(id=teacher_id)
teacher_list.append(teacher)
return render(request, 'usercenter-fav-teacher.html', {
'teacher_list': teacher_list
})
class MyFavCourseVIew(LoginRequiredMixin, View):
"""我收藏的机构课程"""
def get(self, request):
course_list = []
fav_course = UserFavorite.objects.filter(user=request.user, fav_type=1)
for course in fav_course:
course_id = course.fav_id
course = Course.objects.get(id=course_id)
course_list.append(course)
return render(request, 'usercenter-fav-course.html', {
'course_list': course_list
})
class MyMessageVIew(LoginRequiredMixin, View):
def get(self, request):
all_message = UserMessage.objects.filter(user=request.user.id) # 获取用户的所有消息
all_unread_message = UserMessage.objects.filter(user=request.user.id, has_read=False) # 获取用户未读的所有消息
for unread_message in all_unread_message: # 用户进入个人消息后清空未读消息记录
unread_message.has_read = True
unread_message.save()
try:
page = request.GET.get('page', 1)
except PageNotAnInteger:
page = 1
p = Paginator(all_message, 10, request=request) # 每页显示10条
messages = p.page(page) # 获取指定页的消息
return render(request, 'usercenter-message.html', {
'messages': messages
})
def page_not_found(request):
# 全局404处理函数
from django.shortcuts import render_to_response
response = render_to_response('404.html', {})
response.status_code = 404
return response
def forbidden(request):
# 全局403处理函数
from django.shortcuts import render_to_response
response = render_to_response('403.html', {})
response.status_code = 403
return response
def page_error(request):
# 全局500处理函数
from django.shortcuts import render_to_response
response = render_to_response('500.html', {})
response.status_code = 500
return response
| mit | -7,679,907,315,270,004,000 | 35.074359 | 108 | 0.605942 | false |
frobnitzem/slack | gen/plan.py | 1 | 1926 | # Plan a parallel copy using n workers into output shape s.
# The algorithm requires prod(s) to be a multiple of n and
# works by matching factors from n with those of s,
# with preference to the right (for R) or left (for L).
# This means as many workers as possible for the most sig. dimensions,
# each doing as many copies as possible on the least sig. ones.
#
# The output is a pair of shapes, with the same length as s:
# index_shape -- outer loops, used to decode the worker starting index
# copy_shape -- shape copied by each worker
#
# prod(index_shape) = n
# index_shape * copy_shape = s
prod = lambda x: reduce(lambda a,b: a*b, x, 1)
def divide_work(s, n, right_side=True):
sz = prod(s)
if n > sz:
raise ValueError, "Have too many workers."
if sz % n != 0:
raise ValueError, "Workers don't evenly divide number of copies."
f = factor(n) # Map (prime factors) (multiplicity)
index = [1 for i in s]
copy = [i for i in s]
pri = range(len(s))
if right_side == True:
pri = reversed(pri)
for i in pri:
for x in factors(s[i]):
try:
if f[x] > 0: # parallelize this one
copy[i] /= x # fewer copies
index[i] *= x # more workers
f[x] -= 1
except KeyError:
pass
if any(v != 0 for k,v in f.iteritems()):
raise ValueError, "Internal Error! Leftover workers (factors = %s)"%(str(f))
return index, copy
def factors(n):
j = 2
while j <= n/2:
if n%j == 0:
yield j
n /= j
else:
j += 1
yield n
def factor(n):
f = {}
for x in factors(n):
try:
f[x] += 1
except KeyError:
f[x] = 1
return f
def test():
for n in range(1, 10):
print n, [i for i in factors(n)]
print plan_copy((4,4,9), 2*3)
| gpl-3.0 | -4,937,704,351,101,991,000 | 26.913043 | 84 | 0.548806 | false |
thopiekar/Cura | cura/Settings/CuraContainerRegistry.py | 1 | 45586 | # Copyright (c) 2019 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
import os
import re
import configparser
from typing import Any, cast, Dict, Optional, List, Union
from PyQt5.QtWidgets import QMessageBox
from UM.Decorators import override
from UM.Settings.ContainerFormatError import ContainerFormatError
from UM.Settings.Interfaces import ContainerInterface
from UM.Settings.ContainerRegistry import ContainerRegistry
from UM.Settings.ContainerStack import ContainerStack
from UM.Settings.InstanceContainer import InstanceContainer
from UM.Settings.SettingInstance import SettingInstance
from UM.Application import Application
from UM.Logger import Logger
from UM.Message import Message
from UM.Platform import Platform
from UM.PluginRegistry import PluginRegistry # For getting the possible profile writers to write with.
from UM.Resources import Resources
from UM.Util import parseBool
from cura.ReaderWriters.ProfileWriter import ProfileWriter
from . import ExtruderStack
from . import GlobalStack
import cura.CuraApplication
from cura.Settings.cura_empty_instance_containers import empty_quality_container
from cura.Machines.ContainerTree import ContainerTree
from cura.ReaderWriters.ProfileReader import NoProfileException, ProfileReader
from UM.i18n import i18nCatalog
catalog = i18nCatalog("cura")
class CuraContainerRegistry(ContainerRegistry):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# We don't have all the machines loaded in the beginning, so in order to add the missing extruder stack
# for single extrusion machines, we subscribe to the containerAdded signal, and whenever a global stack
# is added, we check to see if an extruder stack needs to be added.
self.containerAdded.connect(self._onContainerAdded)
## Overridden from ContainerRegistry
#
# Adds a container to the registry.
#
# This will also try to convert a ContainerStack to either Extruder or
# Global stack based on metadata information.
@override(ContainerRegistry)
def addContainer(self, container: ContainerInterface) -> None:
# Note: Intentional check with type() because we want to ignore subclasses
if type(container) == ContainerStack:
container = self._convertContainerStack(cast(ContainerStack, container))
if isinstance(container, InstanceContainer) and type(container) != type(self.getEmptyInstanceContainer()):
# Check against setting version of the definition.
required_setting_version = cura.CuraApplication.CuraApplication.SettingVersion
actual_setting_version = int(container.getMetaDataEntry("setting_version", default = 0))
if required_setting_version != actual_setting_version:
Logger.log("w", "Instance container {container_id} is outdated. Its setting version is {actual_setting_version} but it should be {required_setting_version}.".format(container_id = container.getId(), actual_setting_version = actual_setting_version, required_setting_version = required_setting_version))
return # Don't add.
super().addContainer(container)
## Create a name that is not empty and unique
# \param container_type \type{string} Type of the container (machine, quality, ...)
# \param current_name \type{} Current name of the container, which may be an acceptable option
# \param new_name \type{string} Base name, which may not be unique
# \param fallback_name \type{string} Name to use when (stripped) new_name is empty
# \return \type{string} Name that is unique for the specified type and name/id
def createUniqueName(self, container_type: str, current_name: str, new_name: str, fallback_name: str) -> str:
new_name = new_name.strip()
num_check = re.compile(r"(.*?)\s*#\d+$").match(new_name)
if num_check:
new_name = num_check.group(1)
if new_name == "":
new_name = fallback_name
unique_name = new_name
i = 1
# In case we are renaming, the current name of the container is also a valid end-result
while self._containerExists(container_type, unique_name) and unique_name != current_name:
i += 1
unique_name = "%s #%d" % (new_name, i)
return unique_name
## Check if a container with of a certain type and a certain name or id exists
# Both the id and the name are checked, because they may not be the same and it is better if they are both unique
# \param container_type \type{string} Type of the container (machine, quality, ...)
# \param container_name \type{string} Name to check
def _containerExists(self, container_type: str, container_name: str):
container_class = ContainerStack if container_type == "machine" else InstanceContainer
return self.findContainersMetadata(container_type = container_class, id = container_name, type = container_type, ignore_case = True) or \
self.findContainersMetadata(container_type = container_class, name = container_name, type = container_type)
## Exports an profile to a file
#
# \param container_list \type{list} the containers to export. This is not
# necessarily in any order!
# \param file_name \type{str} the full path and filename to export to.
# \param file_type \type{str} the file type with the format "<description> (*.<extension>)"
# \return True if the export succeeded, false otherwise.
def exportQualityProfile(self, container_list: List[InstanceContainer], file_name: str, file_type: str) -> bool:
# Parse the fileType to deduce what plugin can save the file format.
# fileType has the format "<description> (*.<extension>)"
split = file_type.rfind(" (*.") # Find where the description ends and the extension starts.
if split < 0: # Not found. Invalid format.
Logger.log("e", "Invalid file format identifier %s", file_type)
return False
description = file_type[:split]
extension = file_type[split + 4:-1] # Leave out the " (*." and ")".
if not file_name.endswith("." + extension): # Auto-fill the extension if the user did not provide any.
file_name += "." + extension
# On Windows, QML FileDialog properly asks for overwrite confirm, but not on other platforms, so handle those ourself.
if not Platform.isWindows():
if os.path.exists(file_name):
result = QMessageBox.question(None, catalog.i18nc("@title:window", "File Already Exists"),
catalog.i18nc("@label Don't translate the XML tag <filename>!", "The file <filename>{0}</filename> already exists. Are you sure you want to overwrite it?").format(file_name))
if result == QMessageBox.No:
return False
profile_writer = self._findProfileWriter(extension, description)
try:
if profile_writer is None:
raise Exception("Unable to find a profile writer")
success = profile_writer.write(file_name, container_list)
except Exception as e:
Logger.log("e", "Failed to export profile to %s: %s", file_name, str(e))
m = Message(catalog.i18nc("@info:status Don't translate the XML tags <filename> or <message>!", "Failed to export profile to <filename>{0}</filename>: <message>{1}</message>", file_name, str(e)),
lifetime = 0,
title = catalog.i18nc("@info:title", "Error"))
m.show()
return False
if not success:
Logger.log("w", "Failed to export profile to %s: Writer plugin reported failure.", file_name)
m = Message(catalog.i18nc("@info:status Don't translate the XML tag <filename>!", "Failed to export profile to <filename>{0}</filename>: Writer plugin reported failure.", file_name),
lifetime = 0,
title = catalog.i18nc("@info:title", "Error"))
m.show()
return False
m = Message(catalog.i18nc("@info:status Don't translate the XML tag <filename>!", "Exported profile to <filename>{0}</filename>", file_name),
title = catalog.i18nc("@info:title", "Export succeeded"))
m.show()
return True
## Gets the plugin object matching the criteria
# \param extension
# \param description
# \return The plugin object matching the given extension and description.
def _findProfileWriter(self, extension: str, description: str) -> Optional[ProfileWriter]:
plugin_registry = PluginRegistry.getInstance()
for plugin_id, meta_data in self._getIOPlugins("profile_writer"):
for supported_type in meta_data["profile_writer"]: # All file types this plugin can supposedly write.
supported_extension = supported_type.get("extension", None)
if supported_extension == extension: # This plugin supports a file type with the same extension.
supported_description = supported_type.get("description", None)
if supported_description == description: # The description is also identical. Assume it's the same file type.
return cast(ProfileWriter, plugin_registry.getPluginObject(plugin_id))
return None
## Imports a profile from a file
#
# \param file_name The full path and filename of the profile to import.
# \return Dict with a 'status' key containing the string 'ok' or 'error',
# and a 'message' key containing a message for the user.
def importProfile(self, file_name: str) -> Dict[str, str]:
Logger.log("d", "Attempting to import profile %s", file_name)
if not file_name:
return { "status": "error", "message": catalog.i18nc("@info:status Don't translate the XML tags <filename>!", "Failed to import profile from <filename>{0}</filename>: {1}", file_name, "Invalid path")}
global_stack = Application.getInstance().getGlobalContainerStack()
if not global_stack:
return {"status": "error", "message": catalog.i18nc("@info:status Don't translate the XML tags <filename>!", "Can't import profile from <filename>{0}</filename> before a printer is added.", file_name)}
container_tree = ContainerTree.getInstance()
machine_extruders = []
for position in sorted(global_stack.extruders):
machine_extruders.append(global_stack.extruders[position])
plugin_registry = PluginRegistry.getInstance()
extension = file_name.split(".")[-1]
for plugin_id, meta_data in self._getIOPlugins("profile_reader"):
if meta_data["profile_reader"][0]["extension"] != extension:
continue
profile_reader = cast(ProfileReader, plugin_registry.getPluginObject(plugin_id))
try:
profile_or_list = profile_reader.read(file_name) # Try to open the file with the profile reader.
except NoProfileException:
return { "status": "ok", "message": catalog.i18nc("@info:status Don't translate the XML tags <filename>!", "No custom profile to import in file <filename>{0}</filename>", file_name)}
except Exception as e:
# Note that this will fail quickly. That is, if any profile reader throws an exception, it will stop reading. It will only continue reading if the reader returned None.
Logger.log("e", "Failed to import profile from %s: %s while using profile reader. Got exception %s", file_name, profile_reader.getPluginId(), str(e))
return { "status": "error", "message": catalog.i18nc("@info:status Don't translate the XML tags <filename>!", "Failed to import profile from <filename>{0}</filename>:", file_name) + "\n<message>" + str(e) + "</message>"}
if profile_or_list:
# Ensure it is always a list of profiles
if not isinstance(profile_or_list, list):
profile_or_list = [profile_or_list]
# First check if this profile is suitable for this machine
global_profile = None
extruder_profiles = []
if len(profile_or_list) == 1:
global_profile = profile_or_list[0]
else:
for profile in profile_or_list:
if not profile.getMetaDataEntry("position"):
global_profile = profile
else:
extruder_profiles.append(profile)
extruder_profiles = sorted(extruder_profiles, key = lambda x: int(x.getMetaDataEntry("position")))
profile_or_list = [global_profile] + extruder_profiles
if not global_profile:
Logger.log("e", "Incorrect profile [%s]. Could not find global profile", file_name)
return { "status": "error",
"message": catalog.i18nc("@info:status Don't translate the XML tags <filename>!", "This profile <filename>{0}</filename> contains incorrect data, could not import it.", file_name)}
profile_definition = global_profile.getMetaDataEntry("definition")
# Make sure we have a profile_definition in the file:
if profile_definition is None:
break
machine_definitions = self.findContainers(id = profile_definition)
if not machine_definitions:
Logger.log("e", "Incorrect profile [%s]. Unknown machine type [%s]", file_name, profile_definition)
return {"status": "error",
"message": catalog.i18nc("@info:status Don't translate the XML tags <filename>!", "This profile <filename>{0}</filename> contains incorrect data, could not import it.", file_name)
}
machine_definition = machine_definitions[0]
# Get the expected machine definition.
# i.e.: We expect gcode for a UM2 Extended to be defined as normal UM2 gcode...
has_machine_quality = parseBool(machine_definition.getMetaDataEntry("has_machine_quality", "false"))
profile_definition = machine_definition.getMetaDataEntry("quality_definition", machine_definition.getId()) if has_machine_quality else "fdmprinter"
expected_machine_definition = container_tree.machines[global_stack.definition.getId()].quality_definition
# And check if the profile_definition matches either one (showing error if not):
if profile_definition != expected_machine_definition:
Logger.log("d", "Profile {file_name} is for machine {profile_definition}, but the current active machine is {expected_machine_definition}. Changing profile's definition.".format(file_name = file_name, profile_definition = profile_definition, expected_machine_definition = expected_machine_definition))
global_profile.setMetaDataEntry("definition", expected_machine_definition)
for extruder_profile in extruder_profiles:
extruder_profile.setMetaDataEntry("definition", expected_machine_definition)
quality_name = global_profile.getName()
quality_type = global_profile.getMetaDataEntry("quality_type")
name_seed = os.path.splitext(os.path.basename(file_name))[0]
new_name = self.uniqueName(name_seed)
# Ensure it is always a list of profiles
if type(profile_or_list) is not list:
profile_or_list = [profile_or_list]
# Make sure that there are also extruder stacks' quality_changes, not just one for the global stack
if len(profile_or_list) == 1:
global_profile = profile_or_list[0]
extruder_profiles = []
for idx, extruder in enumerate(global_stack.extruders.values()):
profile_id = ContainerRegistry.getInstance().uniqueName(global_stack.getId() + "_extruder_" + str(idx + 1))
profile = InstanceContainer(profile_id)
profile.setName(quality_name)
profile.setMetaDataEntry("setting_version", cura.CuraApplication.CuraApplication.SettingVersion)
profile.setMetaDataEntry("type", "quality_changes")
profile.setMetaDataEntry("definition", expected_machine_definition)
profile.setMetaDataEntry("quality_type", quality_type)
profile.setDirty(True)
if idx == 0:
# Move all per-extruder settings to the first extruder's quality_changes
for qc_setting_key in global_profile.getAllKeys():
settable_per_extruder = global_stack.getProperty(qc_setting_key, "settable_per_extruder")
if settable_per_extruder:
setting_value = global_profile.getProperty(qc_setting_key, "value")
setting_definition = global_stack.getSettingDefinition(qc_setting_key)
if setting_definition is not None:
new_instance = SettingInstance(setting_definition, profile)
new_instance.setProperty("value", setting_value)
new_instance.resetState() # Ensure that the state is not seen as a user state.
profile.addInstance(new_instance)
profile.setDirty(True)
global_profile.removeInstance(qc_setting_key, postpone_emit = True)
extruder_profiles.append(profile)
for profile in extruder_profiles:
profile_or_list.append(profile)
# Import all profiles
profile_ids_added = [] # type: List[str]
for profile_index, profile in enumerate(profile_or_list):
if profile_index == 0:
# This is assumed to be the global profile
profile_id = (cast(ContainerInterface, global_stack.getBottom()).getId() + "_" + name_seed).lower().replace(" ", "_")
elif profile_index < len(machine_extruders) + 1:
# This is assumed to be an extruder profile
extruder_id = machine_extruders[profile_index - 1].definition.getId()
extruder_position = str(profile_index - 1)
if not profile.getMetaDataEntry("position"):
profile.setMetaDataEntry("position", extruder_position)
else:
profile.setMetaDataEntry("position", extruder_position)
profile_id = (extruder_id + "_" + name_seed).lower().replace(" ", "_")
else: # More extruders in the imported file than in the machine.
continue # Delete the additional profiles.
result = self._configureProfile(profile, profile_id, new_name, expected_machine_definition)
if result is not None:
# Remove any profiles that did got added.
for profile_id in profile_ids_added:
self.removeContainer(profile_id)
return {"status": "error", "message": catalog.i18nc(
"@info:status Don't translate the XML tag <filename>!",
"Failed to import profile from <filename>{0}</filename>:",
file_name) + " " + result}
profile_ids_added.append(profile.getId())
return {"status": "ok", "message": catalog.i18nc("@info:status", "Successfully imported profile {0}", profile_or_list[0].getName())}
# This message is throw when the profile reader doesn't find any profile in the file
return {"status": "error", "message": catalog.i18nc("@info:status", "File {0} does not contain any valid profile.", file_name)}
# If it hasn't returned by now, none of the plugins loaded the profile successfully.
return {"status": "error", "message": catalog.i18nc("@info:status", "Profile {0} has an unknown file type or is corrupted.", file_name)}
@override(ContainerRegistry)
def load(self) -> None:
super().load()
self._registerSingleExtrusionMachinesExtruderStacks()
self._connectUpgradedExtruderStacksToMachines()
## Check if the metadata for a container is okay before adding it.
#
# This overrides the one from UM.Settings.ContainerRegistry because we
# also require that the setting_version is correct.
@override(ContainerRegistry)
def _isMetadataValid(self, metadata: Optional[Dict[str, Any]]) -> bool:
if metadata is None:
return False
if "setting_version" not in metadata:
return False
try:
if int(metadata["setting_version"]) != cura.CuraApplication.CuraApplication.SettingVersion:
return False
except ValueError: #Not parsable as int.
return False
return True
## Update an imported profile to match the current machine configuration.
#
# \param profile The profile to configure.
# \param id_seed The base ID for the profile. May be changed so it does not conflict with existing containers.
# \param new_name The new name for the profile.
#
# \return None if configuring was successful or an error message if an error occurred.
def _configureProfile(self, profile: InstanceContainer, id_seed: str, new_name: str, machine_definition_id: str) -> Optional[str]:
profile.setDirty(True) # Ensure the profiles are correctly saved
new_id = self.createUniqueName("quality_changes", "", id_seed, catalog.i18nc("@label", "Custom profile"))
profile.setMetaDataEntry("id", new_id)
profile.setName(new_name)
# Set the unique Id to the profile, so it's generating a new one even if the user imports the same profile
# It also solves an issue with importing profiles from G-Codes
profile.setMetaDataEntry("id", new_id)
profile.setMetaDataEntry("definition", machine_definition_id)
if "type" in profile.getMetaData():
profile.setMetaDataEntry("type", "quality_changes")
else:
profile.setMetaDataEntry("type", "quality_changes")
quality_type = profile.getMetaDataEntry("quality_type")
if not quality_type:
return catalog.i18nc("@info:status", "Profile is missing a quality type.")
global_stack = Application.getInstance().getGlobalContainerStack()
if global_stack is None:
return None
definition_id = ContainerTree.getInstance().machines[global_stack.definition.getId()].quality_definition
profile.setDefinition(definition_id)
# Check to make sure the imported profile actually makes sense in context of the current configuration.
# This prevents issues where importing a "draft" profile for a machine without "draft" qualities would report as
# successfully imported but then fail to show up.
quality_group_dict = ContainerTree.getInstance().getCurrentQualityGroups()
# "not_supported" profiles can be imported.
if quality_type != empty_quality_container.getMetaDataEntry("quality_type") and quality_type not in quality_group_dict:
return catalog.i18nc("@info:status", "Could not find a quality type {0} for the current configuration.", quality_type)
ContainerRegistry.getInstance().addContainer(profile)
return None
@override(ContainerRegistry)
def saveDirtyContainers(self) -> None:
# Lock file for "more" atomically loading and saving to/from config dir.
with self.lockFile():
# Save base files first
for instance in self.findDirtyContainers(container_type=InstanceContainer):
if instance.getMetaDataEntry("removed"):
continue
if instance.getId() == instance.getMetaData().get("base_file"):
self.saveContainer(instance)
for instance in self.findDirtyContainers(container_type=InstanceContainer):
if instance.getMetaDataEntry("removed"):
continue
self.saveContainer(instance)
for stack in self.findContainerStacks():
self.saveContainer(stack)
## Gets a list of profile writer plugins
# \return List of tuples of (plugin_id, meta_data).
def _getIOPlugins(self, io_type):
plugin_registry = PluginRegistry.getInstance()
active_plugin_ids = plugin_registry.getActivePlugins()
result = []
for plugin_id in active_plugin_ids:
meta_data = plugin_registry.getMetaData(plugin_id)
if io_type in meta_data:
result.append( (plugin_id, meta_data) )
return result
## Convert an "old-style" pure ContainerStack to either an Extruder or Global stack.
def _convertContainerStack(self, container: ContainerStack) -> Union[ExtruderStack.ExtruderStack, GlobalStack.GlobalStack]:
assert type(container) == ContainerStack
container_type = container.getMetaDataEntry("type")
if container_type not in ("extruder_train", "machine"):
# It is not an extruder or machine, so do nothing with the stack
return container
Logger.log("d", "Converting ContainerStack {stack} to {type}", stack = container.getId(), type = container_type)
if container_type == "extruder_train":
new_stack = ExtruderStack.ExtruderStack(container.getId())
else:
new_stack = GlobalStack.GlobalStack(container.getId())
container_contents = container.serialize()
new_stack.deserialize(container_contents)
# Delete the old configuration file so we do not get double stacks
if os.path.isfile(container.getPath()):
os.remove(container.getPath())
return new_stack
def _registerSingleExtrusionMachinesExtruderStacks(self) -> None:
machines = self.findContainerStacks(type = "machine", machine_extruder_trains = {"0": "fdmextruder"})
for machine in machines:
extruder_stacks = self.findContainerStacks(type = "extruder_train", machine = machine.getId())
if not extruder_stacks:
self.addExtruderStackForSingleExtrusionMachine(machine, "fdmextruder")
def _onContainerAdded(self, container: ContainerInterface) -> None:
# We don't have all the machines loaded in the beginning, so in order to add the missing extruder stack
# for single extrusion machines, we subscribe to the containerAdded signal, and whenever a global stack
# is added, we check to see if an extruder stack needs to be added.
if not isinstance(container, ContainerStack) or container.getMetaDataEntry("type") != "machine":
return
machine_extruder_trains = container.getMetaDataEntry("machine_extruder_trains")
if machine_extruder_trains is not None and machine_extruder_trains != {"0": "fdmextruder"}:
return
extruder_stacks = self.findContainerStacks(type = "extruder_train", machine = container.getId())
if not extruder_stacks:
self.addExtruderStackForSingleExtrusionMachine(container, "fdmextruder")
#
# new_global_quality_changes is optional. It is only used in project loading for a scenario like this:
# - override the current machine
# - create new for custom quality profile
# new_global_quality_changes is the new global quality changes container in this scenario.
# create_new_ids indicates if new unique ids must be created
#
def addExtruderStackForSingleExtrusionMachine(self, machine, extruder_id, new_global_quality_changes = None, create_new_ids = True):
new_extruder_id = extruder_id
application = cura.CuraApplication.CuraApplication.getInstance()
extruder_definitions = self.findDefinitionContainers(id = new_extruder_id)
if not extruder_definitions:
Logger.log("w", "Could not find definition containers for extruder %s", new_extruder_id)
return
extruder_definition = extruder_definitions[0]
unique_name = self.uniqueName(machine.getName() + " " + new_extruder_id) if create_new_ids else machine.getName() + " " + new_extruder_id
extruder_stack = ExtruderStack.ExtruderStack(unique_name)
extruder_stack.setName(extruder_definition.getName())
extruder_stack.setDefinition(extruder_definition)
extruder_stack.setMetaDataEntry("position", extruder_definition.getMetaDataEntry("position"))
# create a new definition_changes container for the extruder stack
definition_changes_id = self.uniqueName(extruder_stack.getId() + "_settings") if create_new_ids else extruder_stack.getId() + "_settings"
definition_changes_name = definition_changes_id
definition_changes = InstanceContainer(definition_changes_id, parent = application)
definition_changes.setName(definition_changes_name)
definition_changes.setMetaDataEntry("setting_version", application.SettingVersion)
definition_changes.setMetaDataEntry("type", "definition_changes")
definition_changes.setMetaDataEntry("definition", extruder_definition.getId())
# move definition_changes settings if exist
for setting_key in definition_changes.getAllKeys():
if machine.definition.getProperty(setting_key, "settable_per_extruder"):
setting_value = machine.definitionChanges.getProperty(setting_key, "value")
if setting_value is not None:
# move it to the extruder stack's definition_changes
setting_definition = machine.getSettingDefinition(setting_key)
new_instance = SettingInstance(setting_definition, definition_changes)
new_instance.setProperty("value", setting_value)
new_instance.resetState() # Ensure that the state is not seen as a user state.
definition_changes.addInstance(new_instance)
definition_changes.setDirty(True)
machine.definitionChanges.removeInstance(setting_key, postpone_emit = True)
self.addContainer(definition_changes)
extruder_stack.setDefinitionChanges(definition_changes)
# create empty user changes container otherwise
user_container_id = self.uniqueName(extruder_stack.getId() + "_user") if create_new_ids else extruder_stack.getId() + "_user"
user_container_name = user_container_id
user_container = InstanceContainer(user_container_id, parent = application)
user_container.setName(user_container_name)
user_container.setMetaDataEntry("type", "user")
user_container.setMetaDataEntry("machine", machine.getId())
user_container.setMetaDataEntry("setting_version", application.SettingVersion)
user_container.setDefinition(machine.definition.getId())
user_container.setMetaDataEntry("position", extruder_stack.getMetaDataEntry("position"))
if machine.userChanges:
# For the newly created extruder stack, we need to move all "per-extruder" settings to the user changes
# container to the extruder stack.
for user_setting_key in machine.userChanges.getAllKeys():
settable_per_extruder = machine.getProperty(user_setting_key, "settable_per_extruder")
if settable_per_extruder:
setting_value = machine.getProperty(user_setting_key, "value")
setting_definition = machine.getSettingDefinition(user_setting_key)
new_instance = SettingInstance(setting_definition, definition_changes)
new_instance.setProperty("value", setting_value)
new_instance.resetState() # Ensure that the state is not seen as a user state.
user_container.addInstance(new_instance)
user_container.setDirty(True)
machine.userChanges.removeInstance(user_setting_key, postpone_emit = True)
self.addContainer(user_container)
extruder_stack.setUserChanges(user_container)
empty_variant = application.empty_variant_container
empty_material = application.empty_material_container
empty_quality = application.empty_quality_container
if machine.variant.getId() not in ("empty", "empty_variant"):
variant = machine.variant
else:
variant = empty_variant
extruder_stack.variant = variant
if machine.material.getId() not in ("empty", "empty_material"):
material = machine.material
else:
material = empty_material
extruder_stack.material = material
if machine.quality.getId() not in ("empty", "empty_quality"):
quality = machine.quality
else:
quality = empty_quality
extruder_stack.quality = quality
machine_quality_changes = machine.qualityChanges
if new_global_quality_changes is not None:
machine_quality_changes = new_global_quality_changes
if machine_quality_changes.getId() not in ("empty", "empty_quality_changes"):
extruder_quality_changes_container = self.findInstanceContainers(name = machine_quality_changes.getName(), extruder = extruder_id)
if extruder_quality_changes_container:
extruder_quality_changes_container = extruder_quality_changes_container[0]
quality_changes_id = extruder_quality_changes_container.getId()
extruder_stack.qualityChanges = self.findInstanceContainers(id = quality_changes_id)[0]
else:
# Some extruder quality_changes containers can be created at runtime as files in the qualities
# folder. Those files won't be loaded in the registry immediately. So we also need to search
# the folder to see if the quality_changes exists.
extruder_quality_changes_container = self._findQualityChangesContainerInCuraFolder(machine_quality_changes.getName())
if extruder_quality_changes_container:
quality_changes_id = extruder_quality_changes_container.getId()
extruder_quality_changes_container.setMetaDataEntry("position", extruder_definition.getMetaDataEntry("position"))
extruder_stack.qualityChanges = self.findInstanceContainers(id = quality_changes_id)[0]
else:
# If we still cannot find a quality changes container for the extruder, create a new one
container_name = machine_quality_changes.getName()
container_id = self.uniqueName(extruder_stack.getId() + "_qc_" + container_name)
extruder_quality_changes_container = InstanceContainer(container_id, parent = application)
extruder_quality_changes_container.setName(container_name)
extruder_quality_changes_container.setMetaDataEntry("type", "quality_changes")
extruder_quality_changes_container.setMetaDataEntry("setting_version", application.SettingVersion)
extruder_quality_changes_container.setMetaDataEntry("position", extruder_definition.getMetaDataEntry("position"))
extruder_quality_changes_container.setMetaDataEntry("quality_type", machine_quality_changes.getMetaDataEntry("quality_type"))
extruder_quality_changes_container.setMetaDataEntry("intent_category", "default") # Intent categories weren't a thing back then.
extruder_quality_changes_container.setDefinition(machine_quality_changes.getDefinition().getId())
self.addContainer(extruder_quality_changes_container)
extruder_stack.qualityChanges = extruder_quality_changes_container
if not extruder_quality_changes_container:
Logger.log("w", "Could not find quality_changes named [%s] for extruder [%s]",
machine_quality_changes.getName(), extruder_stack.getId())
else:
# Move all per-extruder settings to the extruder's quality changes
for qc_setting_key in machine_quality_changes.getAllKeys():
settable_per_extruder = machine.getProperty(qc_setting_key, "settable_per_extruder")
if settable_per_extruder:
setting_value = machine_quality_changes.getProperty(qc_setting_key, "value")
setting_definition = machine.getSettingDefinition(qc_setting_key)
new_instance = SettingInstance(setting_definition, definition_changes)
new_instance.setProperty("value", setting_value)
new_instance.resetState() # Ensure that the state is not seen as a user state.
extruder_quality_changes_container.addInstance(new_instance)
extruder_quality_changes_container.setDirty(True)
machine_quality_changes.removeInstance(qc_setting_key, postpone_emit=True)
else:
extruder_stack.qualityChanges = self.findInstanceContainers(id = "empty_quality_changes")[0]
self.addContainer(extruder_stack)
# Also need to fix the other qualities that are suitable for this machine. Those quality changes may still have
# per-extruder settings in the container for the machine instead of the extruder.
if machine_quality_changes.getId() not in ("empty", "empty_quality_changes"):
quality_changes_machine_definition_id = machine_quality_changes.getDefinition().getId()
else:
whole_machine_definition = machine.definition
machine_entry = machine.definition.getMetaDataEntry("machine")
if machine_entry is not None:
container_registry = ContainerRegistry.getInstance()
whole_machine_definition = container_registry.findDefinitionContainers(id = machine_entry)[0]
quality_changes_machine_definition_id = "fdmprinter"
if whole_machine_definition.getMetaDataEntry("has_machine_quality"):
quality_changes_machine_definition_id = machine.definition.getMetaDataEntry("quality_definition",
whole_machine_definition.getId())
qcs = self.findInstanceContainers(type = "quality_changes", definition = quality_changes_machine_definition_id)
qc_groups = {} # map of qc names -> qc containers
for qc in qcs:
qc_name = qc.getName()
if qc_name not in qc_groups:
qc_groups[qc_name] = []
qc_groups[qc_name].append(qc)
# Try to find from the quality changes cura directory too
quality_changes_container = self._findQualityChangesContainerInCuraFolder(machine_quality_changes.getName())
if quality_changes_container:
qc_groups[qc_name].append(quality_changes_container)
for qc_name, qc_list in qc_groups.items():
qc_dict = {"global": None, "extruders": []}
for qc in qc_list:
extruder_position = qc.getMetaDataEntry("position")
if extruder_position is not None:
qc_dict["extruders"].append(qc)
else:
qc_dict["global"] = qc
if qc_dict["global"] is not None and len(qc_dict["extruders"]) == 1:
# Move per-extruder settings
for qc_setting_key in qc_dict["global"].getAllKeys():
settable_per_extruder = machine.getProperty(qc_setting_key, "settable_per_extruder")
if settable_per_extruder:
setting_value = qc_dict["global"].getProperty(qc_setting_key, "value")
setting_definition = machine.getSettingDefinition(qc_setting_key)
new_instance = SettingInstance(setting_definition, definition_changes)
new_instance.setProperty("value", setting_value)
new_instance.resetState() # Ensure that the state is not seen as a user state.
qc_dict["extruders"][0].addInstance(new_instance)
qc_dict["extruders"][0].setDirty(True)
qc_dict["global"].removeInstance(qc_setting_key, postpone_emit=True)
# Set next stack at the end
extruder_stack.setNextStack(machine)
return extruder_stack
def _findQualityChangesContainerInCuraFolder(self, name: str) -> Optional[InstanceContainer]:
quality_changes_dir = Resources.getPath(cura.CuraApplication.CuraApplication.ResourceTypes.QualityChangesInstanceContainer)
instance_container = None
for item in os.listdir(quality_changes_dir):
file_path = os.path.join(quality_changes_dir, item)
if not os.path.isfile(file_path):
continue
parser = configparser.ConfigParser(interpolation = None)
try:
parser.read([file_path])
except Exception:
# Skip, it is not a valid stack file
continue
if not parser.has_option("general", "name"):
continue
if parser["general"]["name"] == name:
# Load the container
container_id = os.path.basename(file_path).replace(".inst.cfg", "")
if self.findInstanceContainers(id = container_id):
# This container is already in the registry, skip it
continue
instance_container = InstanceContainer(container_id)
with open(file_path, "r", encoding = "utf-8") as f:
serialized = f.read()
try:
instance_container.deserialize(serialized, file_path)
except ContainerFormatError:
Logger.logException("e", "Unable to deserialize InstanceContainer %s", file_path)
continue
self.addContainer(instance_container)
break
return instance_container
# Fix the extruders that were upgraded to ExtruderStack instances during addContainer.
# The stacks are now responsible for setting the next stack on deserialize. However,
# due to problems with loading order, some stacks may not have the proper next stack
# set after upgrading, because the proper global stack was not yet loaded. This method
# makes sure those extruders also get the right stack set.
def _connectUpgradedExtruderStacksToMachines(self) -> None:
extruder_stacks = self.findContainers(container_type = ExtruderStack.ExtruderStack)
for extruder_stack in extruder_stacks:
if extruder_stack.getNextStack():
# Has the right next stack, so ignore it.
continue
machines = ContainerRegistry.getInstance().findContainerStacks(id = extruder_stack.getMetaDataEntry("machine", ""))
if machines:
extruder_stack.setNextStack(machines[0])
else:
Logger.log("w", "Could not find machine {machine} for extruder {extruder}", machine = extruder_stack.getMetaDataEntry("machine"), extruder = extruder_stack.getId())
# Override just for the type.
@classmethod
@override(ContainerRegistry)
def getInstance(cls, *args, **kwargs) -> "CuraContainerRegistry":
return cast(CuraContainerRegistry, super().getInstance(*args, **kwargs))
| lgpl-3.0 | 888,320,683,751,068,900 | 57.511749 | 321 | 0.624841 | false |
agdsn/hades | src/hades/common/cli.py | 1 | 5189 | """Functionality for the Hades command-line utilities in :mod:`hades.bin`."""
import argparse
import logging.handlers
import os
import sys
import textwrap
from gettext import gettext as _
from hades import constants
class ArgumentParser(argparse.ArgumentParser):
"""ArgumentParser subclass that exists with :data:`os.EX_USAGE` exit code if
parsing fails."""
def error(self, message):
self.print_usage(sys.stderr)
args = {'prog': self.prog, 'message': message}
self.exit(os.EX_USAGE, _('%(prog)s: error: %(message)s\n') % args)
class VersionAction(argparse.Action):
# noinspection PyShadowingBuiltins
def __init__(self,
option_strings,
version_info=None,
dest=argparse.SUPPRESS,
default=argparse.SUPPRESS,
help="show program's version number, configure options, copyright notice and exit"):
super(VersionAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
self.version_info = version_info
def __call__(self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, values, option_string=None):
version_info = self.version_info
print(version_info)
parser.exit()
parser = ArgumentParser(add_help=False)
parser.add_argument('-c', '--config', default=None, help="Path to config file")
parser.add_argument('-v', '--verbose', dest='verbosity',
default=None, action='count', help='Be more verbose')
parser.add_argument('-q', '--quiet', dest='verbosity',
action='store_const', const=0, help='Be quiet')
parser.add_argument(
'-V', '--version', action=VersionAction, version_info=textwrap.dedent(
"""\
{PACKAGE_NAME} version {PACKAGE_VERSION}
Configure Options: {CONFIGURE_ARGS}
Copyright (c) 2015-2020 {PACKAGE_AUTHOR}
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
).rstrip().format(
PACKAGE_NAME=constants.PACKAGE_NAME,
PACKAGE_VERSION=constants.PACKAGE_VERSION,
CONFIGURE_ARGS=constants.CONFIGURE_ARGS,
PACKAGE_AUTHOR=constants.PACKAGE_AUTHOR,
)
)
parser.add_argument('--syslog', nargs='?', const='/dev/log',
help="Log to syslog instead of stderr. A path to the log "
"socket may be provided, defaults to /dev/log "
"otherwise")
VERBOSITY_LEVELS = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
DEFAULT_VERBOSITY = 1
def setup_cli_logging(program, args):
"""
Setup logging for CLI applications, that do not configure logging
themselves.
Set log level using command-line options parsed with :data:`parser`, the
:std:envvar:`HADES_CONFIG` environment variable or finally the default value
:data:`DEFAULT_VERBOSITY`-
Flask and Celery are quite opinionated about logging, so this function
should probably not be called in their launchers.
:param program: The name of the program
:param args: The parsed arguments of the program with :data:`parser` or a
subparser.
"""
reset_cli_logging()
if args.verbosity is None:
verbosity = os.environ.get('HADES_VERBOSITY', DEFAULT_VERBOSITY)
try:
verbosity = int(verbosity)
except ValueError:
verbosity = DEFAULT_VERBOSITY
else:
verbosity = args.verbosity
effective_verbosity = max(0, min(len(VERBOSITY_LEVELS) - 1, verbosity))
level = VERBOSITY_LEVELS[effective_verbosity]
if level <= logging.DEBUG:
fmt = ("[%(asctime)s] %(levelname)s in %(filename)s:%(lineno)d: "
"%(message)s")
else:
fmt = "%(message)s"
stderr_handler = logging.StreamHandler(stream=sys.stderr)
stderr_handler.name = "stderr"
if args.syslog is not None:
# Also log critical messages to stderr
stderr_handler.setLevel(logging.CRITICAL)
syslog_handler = logging.handlers.SysLogHandler(address=args.syslog)
syslog_handler.name = "syslog"
handlers = [syslog_handler, stderr_handler]
else:
handlers = [stderr_handler]
logging.basicConfig(level=level, style='%', format=fmt, handlers=handlers)
def reset_cli_logging():
"""Reset root logger configuration"""
root = logging.root
for h in root.handlers:
try:
h.acquire()
h.flush()
h.close()
except (OSError, ValueError):
pass
finally:
h.release()
root.removeHandler(h)
for f in root.filters:
root.removeFilter(f)
| mit | -2,648,820,304,603,994,000 | 36.601449 | 115 | 0.63943 | false |
etingof/pyasn1-modules | tests/test_rfc3657.py | 2 | 6803 | #
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
import unittest
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1_modules import pem
from pyasn1_modules import rfc3657
from pyasn1_modules import rfc5652
from pyasn1_modules import rfc5751
class EnvelopedDataTestCase(unittest.TestCase):
env_data_pem_text = """\
MIIFfwYJKoZIhvcNAQcDoIIFcDCCBWwCAQIxU6JRAgEEMCMEECBlcTFnxBsPlsug
4KOCj78YDzIwMTkwOTEyMTIwMDAwWjANBgsqgwiMmks9AQEDAgQYS3mK9jQmvth1
iuBV8PEa89ICvmoomJCvMIIFEAYJKoZIhvcNAQcBMB8GCyqDCIyaSz0BAQECBBBC
T0dVU0lWX0JPR1VTSVYhgIIE4HPHsXoYyQ/4LRDiK4OrSuRJmmuDye5fH/hLcgw/
330Gsl1QBs9jF1CEDBM5ki657K/TRMl78Rqb3LIu5lfLQ8WVNGLsoQPwvxzIexGg
ShtYYwu8TcPiESFMa20SWpDEG8zFlmCbqQuc0buPxnvYviVThoBEthNC+S2Umed8
JpxwNKJbNTx5dxd2dkDNwpHsKgNzT9cGl0NF129Dspehqtdge5LJu3rj1gNynLRI
32AQ+pwU+sEHee6wDHhU5OWnHlndkm/9MTKY3woOhs1/KQFlRFPC6k71ZpUlncd3
93wLVIImfoMe4zWPAOnbpZ/M7zEJ95rTwwmudBs0qwMfCa3h0Vkg69w6fBHyc1IH
8u3VpSPcbOW4dUzJBDJPgB1kObAV02ZA4FQEuZtZiG13u3c7sSrHxsY1rtXssvSe
+5rThqPWgDqmH8b/yPGEHIFh03kHCDt/UZrdkLCO7a0WhCdY4I9hNU6OYEQmyEFs
0LsqEumn34Lv/XcD1wgLdPtF65zub4Wil/0Vpu73vIWLIk9LyNIXQSd6w0ZHUvVS
+jZZ1zrqIQKhKvG97NpKAYoHa4tOdoXHgBJUxw/uAOKkQ4jC5RS5UKqCZaQcArRD
2bCEEsutiuyf06MMcWm+RaBY1EwuX+/cT0D6CsWHYFAeQHgLuR4HVk5+PVKoOL/7
KUz0jUU5gzFVcmfaocyX5A6R90yggBObefcOIEj3v+5fjHkppfTvi/R03fVZ4Nyw
WyHbN7kOHHy8skJpcvNaqSY0dfkb8KOOoTptJH9rCBYtFlC5j/18y8Om9Um4h3/4
6hYO0xU8izJDzDzJnO/5KS5mGyskweIp3mrE1C/mw68LvrksxQI03CPtbM+FqOKe
0VcsAQykiOTnG3d4jLeF1iVrc9CgV+pwc5VfgQUwsGhjAFOCKTwWDrr3Je0yVsfz
gwY2zuM5uE/+usOSBt7SqbFTLOCba4fJrVVwi0wZig88owVTdl/ACxl2qyLUYC2u
5PNJSY6kx8Cgo4gDJk/3oeuys8JqgaufvKybl5GsdDaF3A7usZAjDR1EAWHZ7JGi
agtqbvISLD0zq4e4nmEhLnIRb7u5SNBPqe8qVuuQjIsvmP0ZuTlnh84ypFOQGz7c
fzHtr6UEQoGj8HImbp8diL4tflmFAVNaRjQzu18+2vFB2w1EZIe2/uNLs9ne2EIy
oK2Qb+mMCwJsNS0xOG0/TzPZ+y0Tp1/LupLHovMosPIGXlbvqZVh2xftDvbIigIM
WZQZ2tFxYD6Xc4zA00v7H0yGF1pRY+3GpobJkw0Y6ORtgdtdnr2ipioIeQCy0hUp
POmTeSr0L3H7KfNY7yQgZg0ra7FIEjM8tDoNqrhznetYUU1ZWM8Lyb3zMxxinSFs
GFGx2TiqPyixJNxN+lPT5D6GRhC9mXgh+BfVod5oINJJwXxJpT5xnsZgW8ujVxiu
1Vt5esXCZaXTGlyjVTH5dmCvJP9+B8n7dOimmCxCbMQKpNaZixJhoXWQtTgKqL1Q
f9WoEs6TDGgfTllqjbE4w3O7ZA7fAWe9jbAGwiPV5rF/NVvjaj2+ibtXbSNPW59d
dy1/2WzknVYnEHF0qZdBZ02Wh4ByXUC3FNvDu8hRTm5aq73DCqXLXUwNU8BvS1xB
bbRq5aYI2Rd3naNAns9dHqSvkg==
"""
def setUp(self):
self.asn1Spec = rfc5652.ContentInfo()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.env_data_pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
self.assertEqual(rfc5652.id_envelopedData, asn1Object['contentType'])
ed, rest = der_decoder(
asn1Object['content'], asn1Spec=rfc5652.EnvelopedData())
self.assertFalse(rest)
self.assertTrue(ed.prettyPrint())
self.assertEqual(asn1Object['content'], der_encoder(ed))
kwa = ed['recipientInfos'][0]['kekri']['keyEncryptionAlgorithm']
self.assertEqual(rfc3657.id_camellia128_wrap, kwa['algorithm'])
cea = ed['encryptedContentInfo']['contentEncryptionAlgorithm']
self.assertEqual(rfc3657.id_camellia128_cbc, cea['algorithm'])
param, rest = der_decoder(
cea['parameters'], asn1Spec=rfc3657.Camellia_IV())
self.assertFalse(rest)
self.assertTrue(param.prettyPrint())
self.assertEqual(cea['parameters'], der_encoder(param))
iv = rfc3657.Camellia_IV(hexValue='424f47555349565f424f475553495621')
self.assertEqual(iv, param)
def testOpenTypes(self):
substrate = pem.readBase64fromText(self.env_data_pem_text)
asn1Object, rest = der_decoder(
substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
self.assertTrue(asn1Object['contentType'] in rfc5652.cmsContentTypesMap.keys())
kekri = asn1Object['content']['recipientInfos'][0]['kekri']
kwa = kekri['keyEncryptionAlgorithm']
self.assertEqual(rfc3657.id_camellia128_wrap, kwa['algorithm'])
eci = asn1Object['content']['encryptedContentInfo']
cea = eci['contentEncryptionAlgorithm']
self.assertEqual(rfc3657.id_camellia128_cbc, cea['algorithm'])
iv = rfc3657.Camellia_IV(hexValue='424f47555349565f424f475553495621')
self.assertEqual(iv, cea['parameters'])
class SMIMECapabilitiesTestCase(unittest.TestCase):
smime_capabilities_pem_text = """\
MGYwDwYLKoMIjJpLPQEBAQIFADAPBgsqgwiMmks9AQEBAwUAMA8GCyqDCIyaSz0B
AQEEBQAwDwYLKoMIjJpLPQEBAwIFADAPBgsqgwiMmks9AQEDAwUAMA8GCyqDCIya
Sz0BAQMEBQA=
"""
def setUp(self):
self.asn1Spec = rfc5751.SMIMECapabilities()
def testDerCodec(self):
alg_oid_list = [
rfc3657.id_camellia128_cbc,
rfc3657.id_camellia192_cbc,
rfc3657.id_camellia256_cbc,
rfc3657.id_camellia128_wrap,
rfc3657.id_camellia192_wrap,
rfc3657.id_camellia256_wrap,
]
substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
param = der_encoder(rfc3657.CamelliaSMimeCapability(""))
count = 0
for cap in asn1Object:
self.assertEqual(cap['parameters'], param)
self.assertTrue(cap['capabilityID'] in alg_oid_list)
count += 1
self.assertEqual(count, 6)
def testOpenTypes(self):
substrate = pem.readBase64fromText(self.smime_capabilities_pem_text)
asn1Object, rest = der_decoder(
substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
param = rfc3657.CamelliaSMimeCapability("")
count = 0
for cap in asn1Object:
self.assertTrue(cap['capabilityID'] in rfc5751.smimeCapabilityMap.keys())
self.assertEqual(cap['parameters'], param)
count += 1
self.assertEqual(count, 6)
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
| bsd-2-clause | -370,215,760,429,315,650 | 39.736527 | 87 | 0.759077 | false |
souravbadami/oppia | core/domain/exp_jobs_one_off_test.py | 1 | 92168 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Exploration-related jobs."""
import ast
import datetime
import logging
from constants import constants
from core import jobs_registry
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_jobs_one_off
from core.domain import exp_services
from core.domain import html_validation_service
from core.domain import rights_manager
from core.domain import state_domain
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import feconf
import utils
(job_models, exp_models, base_models, classifier_models) = (
models.Registry.import_models([
models.NAMES.job, models.NAMES.exploration, models.NAMES.base_model,
models.NAMES.classifier]))
memcache_services = models.Registry.import_memcache_services()
search_services = models.Registry.import_search_services()
taskqueue_services = models.Registry.import_taskqueue_services()
# This mock should be used only in ExplorationContentValidationJobForCKEditor
# and InteractionCustomizationArgsValidationJob.
# The first job validates the html strings and produces as output the invalid
# strings. If we do not use mock validation for rte while updating
# states and saving exploration, the validation for subtitled html
# in state will fail, thereby resulting in failure of job.
# The second job validates the customization args in html and if the
# mock is not used while updating states and saving explorations,
# the validation for subtitled html in state will fail, thereby
# resulting in failure of job.
def mock_validate(unused_self):
pass
def run_job_for_deleted_exp(
self, job_class, check_error=False,
error_type=None, error_msg=None, function_to_be_called=None,
exp_id=None):
"""Helper function to run job for a deleted exploration and check the
output or error condition.
"""
job_id = job_class.create_new()
# Check there is one job in the taskqueue corresponding to
# delete_exploration_from_subscribed_users.
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
job_class.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 2)
self.process_and_flush_pending_tasks()
if check_error:
with self.assertRaisesRegexp(error_type, error_msg):
function_to_be_called(exp_id)
else:
self.assertEqual(job_class.get_output(job_id), [])
class ExpSummariesCreationOneOffJobTest(test_utils.GenericTestBase):
"""Tests for ExpSummary aggregations."""
ONE_OFF_JOB_MANAGERS_FOR_TESTS = [
exp_jobs_one_off.ExpSummariesCreationOneOffJob]
# Specify explorations that will be used in the test.
EXP_SPECS = [{
'category': 'Category A',
'title': 'Title 1'
}, {
'category': 'Category B',
'title': 'Title 2'
}, {
'category': 'Category C',
'title': 'Title 3'
}, {
'category': 'Category A',
'title': 'Title 4'
}, {
'category': 'Category C',
'title': 'Title 5'
}]
def setUp(self):
super(ExpSummariesCreationOneOffJobTest, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.signup(self.VOICE_ARTIST_EMAIL, self.VOICE_ARTIST_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.voice_artist_id = self.get_user_id_from_email(
self.VOICE_ARTIST_EMAIL)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
self.login(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.admin = user_services.UserActionsInfo(self.admin_id)
def test_all_exps_public(self):
"""Test summary batch job if all explorations are public."""
self._run_batch_job_once_and_verify_output(
self.EXP_SPECS,
default_status=rights_manager.ACTIVITY_STATUS_PUBLIC)
def test_all_exps_private(self):
"""Test summary batch job if all explorations are private."""
self._run_batch_job_once_and_verify_output(
self.EXP_SPECS,
default_status=rights_manager.ACTIVITY_STATUS_PRIVATE)
def _run_batch_job_once_and_verify_output(
self, exp_specs,
default_title='A title',
default_category='A category',
default_status=rights_manager.ACTIVITY_STATUS_PUBLIC):
"""Run batch job for creating exploration summaries once and verify its
output. exp_specs is a list of dicts with exploration specifications.
Allowed keys are category, status, title. If a key is not specified,
the default value is used.
"""
with self.swap(
jobs_registry, 'ONE_OFF_JOB_MANAGERS',
self.ONE_OFF_JOB_MANAGERS_FOR_TESTS
):
default_spec = {
'title': default_title,
'category': default_category,
'status': default_status
}
# Create and delete an exploration (to make sure job handles
# deleted explorations correctly).
exp_id = '100'
self.save_new_valid_exploration(
exp_id,
self.admin_id,
title=default_spec['title'],
category=default_spec['category'])
exploration = exp_fetchers.get_exploration_by_id(exp_id)
exp_services.delete_exploration(self.admin_id, exp_id)
# Get dummy explorations.
num_exps = len(exp_specs)
expected_job_output = {}
for ind in range(num_exps):
exp_id = str(ind)
spec = default_spec
spec.update(exp_specs[ind])
exploration = exp_domain.Exploration.create_default_exploration(
exp_id, title=spec['title'], category=spec['category'])
exploration.tags = ['computer science', 'analysis', 'a b c']
exp_services.save_new_exploration(self.admin_id, exploration)
exploration = exp_fetchers.get_exploration_by_id(exp_id)
rights_manager.assign_role_for_exploration(
self.admin, exp_id, self.voice_artist_id, 'voice artist')
rights_manager.assign_role_for_exploration(
self.admin, exp_id, self.viewer_id, 'viewer')
rights_manager.assign_role_for_exploration(
self.admin, exp_id, self.editor_id, 'editor')
# Publish exploration.
if spec['status'] == rights_manager.ACTIVITY_STATUS_PUBLIC:
rights_manager.publish_exploration(self.admin, exp_id)
# Do not include user_id here, so all explorations are not
# editable for now (will be updated depending on user_id
# in galleries).
exp_rights_model = exp_models.ExplorationRightsModel.get(
exp_id)
exploration = exp_fetchers.get_exploration_by_id(exp_id)
exploration_model_last_updated = exploration.last_updated
exploration_model_created_on = exploration.created_on
first_published_msec = (
exp_rights_model.first_published_msec)
# Manually create the expected summary specifying title,
# category, etc.
expected_job_output[exp_id] = exp_domain.ExplorationSummary(
exp_id,
spec['title'],
spec['category'],
exploration.objective,
exploration.language_code,
exploration.tags,
feconf.get_empty_ratings(),
feconf.EMPTY_SCALED_AVERAGE_RATING,
spec['status'],
exp_rights_model.community_owned,
exp_rights_model.owner_ids,
exp_rights_model.editor_ids,
exp_rights_model.voice_artist_ids,
exp_rights_model.viewer_ids,
[self.admin_id],
{self.admin_id: 1},
exploration.version,
exploration_model_created_on,
exploration_model_last_updated,
first_published_msec)
# Note: Calling constructor for fields that are not required
# and have no default value does not work, because
# unspecified fields will be empty list in
# expected_job_output but will be unspecified in
# actual_job_output.
if exploration.tags:
expected_job_output[exp_id].tags = exploration.tags
if exp_rights_model.owner_ids:
expected_job_output[exp_id].owner_ids = (
exp_rights_model.owner_ids)
if exp_rights_model.editor_ids:
expected_job_output[exp_id].editor_ids = (
exp_rights_model.editor_ids)
if exp_rights_model.voice_artist_ids:
expected_job_output[exp_id].voice_artist_ids = (
exp_rights_model.voice_artist_ids)
if exp_rights_model.viewer_ids:
expected_job_output[exp_id].viewer_ids = (
exp_rights_model.viewer_ids)
if exploration.version:
expected_job_output[exp_id].version = (
exploration.version)
# Run batch job.
job_id = (
exp_jobs_one_off.ExpSummariesCreationOneOffJob.create_new())
exp_jobs_one_off.ExpSummariesCreationOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
# Get and check job output.
actual_job_output = exp_services.get_all_exploration_summaries()
self.assertEqual(
actual_job_output.keys(), expected_job_output.keys())
# Note: 'exploration_model_last_updated' is not expected to be the
# same, because it is now read from the version model representing
# the exploration's history snapshot, and not the ExplorationModel.
simple_props = ['id', 'title', 'category', 'objective',
'language_code', 'tags', 'ratings', 'status',
'community_owned', 'owner_ids',
'editor_ids', 'voice_artist_ids', 'viewer_ids',
'contributor_ids', 'contributors_summary',
'version', 'exploration_model_created_on']
for exp_id in actual_job_output:
for prop in simple_props:
self.assertEqual(
getattr(actual_job_output[exp_id], prop),
getattr(expected_job_output[exp_id], prop))
def test_exp_summaries_creation_job_output(self):
"""Test that ExpSummariesCreationOneOff job output is correct."""
with self.swap(
jobs_registry, 'ONE_OFF_JOB_MANAGERS',
self.ONE_OFF_JOB_MANAGERS_FOR_TESTS
):
exp_id1 = '1'
self.save_new_valid_exploration(
exp_id1,
self.admin_id,
title='title',
category='category')
rights_manager.publish_exploration(self.admin, exp_id1)
exp_id2 = '2'
self.save_new_valid_exploration(
exp_id2,
self.admin_id,
title='title',
category='category')
rights_manager.publish_exploration(self.admin, exp_id2)
exp_services.delete_exploration(self.admin_id, exp_id2)
# Run ExpSummariesCreationOneOff job on sample exploration.
job_id = (
exp_jobs_one_off.ExpSummariesCreationOneOffJob.create_new())
exp_jobs_one_off.ExpSummariesCreationOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
actual_output = (
exp_jobs_one_off.ExpSummariesCreationOneOffJob.get_output(
job_id))
expected_output = ['[u\'SUCCESS\', 1]']
self.assertEqual(actual_output, expected_output)
class ExpSummariesContributorsOneOffJobTests(test_utils.GenericTestBase):
ONE_OFF_JOB_MANAGERS_FOR_TESTS = [
exp_jobs_one_off.ExpSummariesContributorsOneOffJob]
EXP_ID = 'exp_id'
USERNAME_A = 'usernamea'
USERNAME_B = 'usernameb'
EMAIL_A = '[email protected]'
EMAIL_B = '[email protected]'
def setUp(self):
super(ExpSummariesContributorsOneOffJobTests, self).setUp()
self.signup(self.EMAIL_A, self.USERNAME_A)
self.user_a_id = self.get_user_id_from_email(self.EMAIL_A)
self.signup(self.EMAIL_B, self.USERNAME_B)
self.user_b_id = self.get_user_id_from_email(self.EMAIL_B)
def test_contributors_for_valid_contribution(self):
"""Test that if only one commit is made, that the contributor
list consists of that contributor's user id.
"""
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.user_a_id)
job_id = (
exp_jobs_one_off.ExpSummariesContributorsOneOffJob.create_new())
exp_jobs_one_off.ExpSummariesContributorsOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
exploration_summary = exp_fetchers.get_exploration_summary_by_id(
exploration.id)
self.assertEqual(
[self.user_a_id], exploration_summary.contributor_ids)
def test_repeat_contributors(self):
"""Test that if the same user makes more than one commit that changes
the content of an exploration, the user is only represented once in the
list of contributors for that exploration.
"""
# Have one user make two commits.
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.user_a_id, title='Original Title')
exploration_model = exp_models.ExplorationModel.get(
self.EXP_ID, strict=True, version=None)
exploration_model.title = 'New title'
exploration_model.commit(
self.user_a_id, 'Changed title.', [])
# Run the job to compute the contributor ids.
job_id = (
exp_jobs_one_off.ExpSummariesContributorsOneOffJob.create_new())
exp_jobs_one_off.ExpSummariesContributorsOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
# Verify that the length of the contributor list is one, and that
# the list contains the user who made these commits.
exploration_summary = exp_fetchers.get_exploration_summary_by_id(
exploration.id)
self.assertEqual(
[self.user_a_id], exploration_summary.contributor_ids)
def test_contributors_with_only_reverts_not_counted(self):
"""Test that contributors who have only done reverts do not
have their user id appear in the contributor list.
"""
# Have one user make two commits.
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.user_a_id, title='Original Title')
change_list = [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'title',
'new_value': 'New title'
})]
exp_services.update_exploration(
self.user_a_id, self.EXP_ID, change_list, 'Changed title.')
# Have the second user revert version 2 to version 1.
exp_services.revert_exploration(self.user_b_id, self.EXP_ID, 2, 1)
# Run the job to compute the contributor ids.
job_id = (
exp_jobs_one_off.ExpSummariesContributorsOneOffJob.create_new())
exp_jobs_one_off.ExpSummariesContributorsOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
# Verify that the committer list does not contain the user
# who only reverted.
exploration_summary = exp_fetchers.get_exploration_summary_by_id(
exploration.id)
self.assertEqual([self.user_a_id], exploration_summary.contributor_ids)
def test_nonhuman_committers_not_counted(self):
"""Test that only human committers are counted as contributors."""
# Create a commit with the system user id.
exploration = self.save_new_valid_exploration(
self.EXP_ID, feconf.SYSTEM_COMMITTER_ID, title='Original Title')
# Run the job to compute the contributor ids.
job_id = (
exp_jobs_one_off.ExpSummariesContributorsOneOffJob.create_new())
exp_jobs_one_off.ExpSummariesContributorsOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
# Check that the system id was not added to the exploration's
# contributor ids.
exploration_summary = exp_fetchers.get_exploration_summary_by_id(
exploration.id)
self.assertNotIn(
feconf.SYSTEM_COMMITTER_ID,
exploration_summary.contributor_ids)
# Create a commit with the migration bot user id.
exploration_model = exp_models.ExplorationModel.get(
self.EXP_ID, strict=True, version=None)
exploration_model.title = 'New title'
exploration_model.commit(
feconf.MIGRATION_BOT_USERNAME, 'Changed title.', [])
# Run the job to compute the contributor ids.
job_id = (
exp_jobs_one_off.ExpSummariesContributorsOneOffJob.create_new())
exp_jobs_one_off.ExpSummariesContributorsOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
# Check that the migration bot id was not added to the exploration's
# contributor ids.
exploration_summary = exp_fetchers.get_exploration_summary_by_id(
exploration.id)
self.assertNotIn(
feconf.MIGRATION_BOT_USERNAME,
exploration_summary.contributor_ids)
def test_no_action_is_performed_for_deleted_exploration(self):
"""Test that no action is performed on deleted explorations."""
exp_id = '100'
self.save_new_valid_exploration(exp_id, self.user_a_id)
exp_services.delete_exploration(self.user_a_id, exp_id)
run_job_for_deleted_exp(
self, exp_jobs_one_off.ExpSummariesContributorsOneOffJob,
function_to_be_called=exp_fetchers.get_exploration_summary_by_id,
exp_id=exp_id)
class ExplorationContributorsSummaryOneOffJobTests(test_utils.GenericTestBase):
ONE_OFF_JOB_MANAGERS_FOR_TESTS = [
exp_jobs_one_off.ExplorationContributorsSummaryOneOffJob]
EXP_ID = 'exp_id'
USERNAME_A = 'usernamea'
USERNAME_B = 'usernameb'
EMAIL_A = '[email protected]'
EMAIL_B = '[email protected]'
def setUp(self):
super(ExplorationContributorsSummaryOneOffJobTests, self).setUp()
self.signup(self.EMAIL_A, self.USERNAME_A)
self.signup(self.EMAIL_B, self.USERNAME_B)
self.user_a_id = self.get_user_id_from_email(self.EMAIL_A)
self.user_b_id = self.get_user_id_from_email(self.EMAIL_B)
def test_contributors_for_valid_nonrevert_contribution(self):
"""Test that if only non-revert commits are made by
contributor then the contributions summary shows same
exact number of commits for that contributor's ID.
"""
# Let USER A make three commits.
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.user_a_id, title='Exploration Title')
exp_services.update_exploration(
self.user_a_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'New Exploration Title'
})], 'Changed title.')
exp_services.update_exploration(
self.user_a_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'New Objective'
})], 'Changed Objective.')
# Run the job to compute contributors summary.
job_id = (
exp_jobs_one_off
.ExplorationContributorsSummaryOneOffJob.create_new()
)
exp_jobs_one_off.ExplorationContributorsSummaryOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
exploration_summary = exp_fetchers.get_exploration_summary_by_id(
exploration.id)
self.assertEqual(
3, exploration_summary.contributors_summary[self.user_a_id])
def test_contributors_with_only_reverts_not_included(self):
"""Test that if only reverts are made by contributor then the
contributions summary shouldn’t contain that contributor’s ID.
"""
# Let USER A make three commits.
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.user_a_id, title='Exploration Title 1')
exp_services.update_exploration(
self.user_a_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'New Exploration Title'
})], 'Changed title.')
exp_services.update_exploration(
self.user_a_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'New Objective'
})], 'Changed Objective.')
# Let the second user revert version 3 to version 2.
exp_services.revert_exploration(self.user_b_id, self.EXP_ID, 3, 2)
# Run the job to compute the contributors summary.
job_id = (
exp_jobs_one_off
.ExplorationContributorsSummaryOneOffJob.create_new()
)
exp_jobs_one_off.ExplorationContributorsSummaryOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
exploration_summary = exp_fetchers.get_exploration_summary_by_id(
exploration.id)
# Check that the contributors_summary does not contains user_b_id.
self.assertNotIn(
self.user_b_id, exploration_summary.contributors_summary)
# Check that the User A has only 2 commits after user b has reverted
# to version 2.
self.assertEqual(
2, exploration_summary.contributors_summary[self.user_a_id])
def test_reverts_not_counted(self):
"""Test that if both non-revert commits and revert are
made by contributor then the contributions summary shows
only non-revert commits for that contributor. However,
the commits made after the version to which we have reverted
shouldn't be counted either.
"""
# Let USER A make 3 non-revert commits.
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.user_a_id, title='Exploration Title')
exp_services.update_exploration(
self.user_a_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'New Exploration Title'
})], 'Changed title.')
exp_services.update_exploration(
self.user_a_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'New Objective'
})], 'Changed Objective.')
# Let USER A revert version 3 to version 2.
exp_services.revert_exploration(self.user_a_id, self.EXP_ID, 3, 2)
# Run the job to compute the contributor summary.
job_id = (
exp_jobs_one_off
.ExplorationContributorsSummaryOneOffJob.create_new()
)
exp_jobs_one_off.ExplorationContributorsSummaryOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
# Check that USER A's number of contributions is equal to 2.
exploration_summary = exp_fetchers.get_exploration_summary_by_id(
exploration.id)
self.assertEqual(
2, exploration_summary.contributors_summary[self.user_a_id])
def test_nonhuman_committers_not_counted(self):
"""Test that only human committers are counted as contributors."""
# Create a commit with the system user id.
exploration = self.save_new_valid_exploration(
self.EXP_ID, feconf.SYSTEM_COMMITTER_ID, title='Original Title')
# Create commits with all the system user ids.
for system_id in constants.SYSTEM_USER_IDS:
exp_services.update_exploration(
system_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'Title changed by %s' % system_id
})], 'Changed title.')
# Run the job to compute the contributor summary.
job_id = (
exp_jobs_one_off
.ExplorationContributorsSummaryOneOffJob.create_new())
exp_jobs_one_off.ExplorationContributorsSummaryOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
# Check that no system id was added to the exploration's
# contributor's summary.
exploration_summary = exp_fetchers.get_exploration_summary_by_id(
exploration.id)
for system_id in constants.SYSTEM_USER_IDS:
self.assertNotIn(
system_id,
exploration_summary.contributors_summary)
def test_no_action_is_performed_for_deleted_exploration(self):
"""Test that no action is performed on deleted explorations."""
exp_id = '100'
self.save_new_valid_exploration(exp_id, self.user_a_id)
exp_services.delete_exploration(self.user_a_id, exp_id)
run_job_for_deleted_exp(
self, exp_jobs_one_off.ExplorationContributorsSummaryOneOffJob,
function_to_be_called=exp_fetchers.get_exploration_summary_by_id,
exp_id=exp_id)
def test_exploration_contributors_summary_job_output(self):
"""Test that ExplorationContributorsSummaryOneOff job output is
correct.
"""
self.save_new_valid_exploration(
self.EXP_ID, self.user_a_id, title='Exploration Title')
# Run the ExplorationContributorsSummaryOneOff job.
job_id = (
exp_jobs_one_off
.ExplorationContributorsSummaryOneOffJob.create_new())
exp_jobs_one_off.ExplorationContributorsSummaryOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
actual_output = (
exp_jobs_one_off.ExplorationContributorsSummaryOneOffJob.get_output(
job_id))
expected_output = ['[u\'SUCCESS\', 1]']
self.assertEqual(actual_output, expected_output)
class OneOffExplorationFirstPublishedJobTests(test_utils.GenericTestBase):
EXP_ID = 'exp_id'
def setUp(self):
super(OneOffExplorationFirstPublishedJobTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.admin = user_services.UserActionsInfo(self.admin_id)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.owner = user_services.UserActionsInfo(self.owner_id)
def test_first_published_time_of_exploration_that_is_unpublished(self):
"""This tests that, if an exploration is published, unpublished, and
then published again, the job uses the first publication time as the
value for first_published_msec.
"""
self.save_new_valid_exploration(
self.EXP_ID, self.owner_id, end_state_name='End')
rights_manager.publish_exploration(self.owner, self.EXP_ID)
job_class = exp_jobs_one_off.ExplorationFirstPublishedOneOffJob
job_id = job_class.create_new()
exp_jobs_one_off.ExplorationFirstPublishedOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
exploration_rights = rights_manager.get_exploration_rights(self.EXP_ID)
# Test to see whether first_published_msec was correctly updated.
exp_first_published = exploration_rights.first_published_msec
exp_rights_model = exp_models.ExplorationRightsModel.get(self.EXP_ID)
last_updated_time_msec = utils.get_time_in_millisecs(
exp_rights_model.last_updated)
self.assertLess(
exp_first_published, last_updated_time_msec)
rights_manager.unpublish_exploration(self.admin, self.EXP_ID)
rights_manager.publish_exploration(self.owner, self.EXP_ID)
job_id = job_class.create_new()
exp_jobs_one_off.ExplorationFirstPublishedOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
# Test to see whether first_published_msec remains the same despite the
# republication.
exploration_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertEqual(
exp_first_published, exploration_rights.first_published_msec)
def test_no_action_is_performed_for_deleted_exploration(self):
"""Test that no action is performed on deleted explorations."""
self.save_new_valid_exploration(
self.EXP_ID, self.owner_id, end_state_name='End')
rights_manager.publish_exploration(self.owner, self.EXP_ID)
exp_services.delete_exploration(self.owner_id, self.EXP_ID)
run_job_for_deleted_exp(
self, exp_jobs_one_off.ExplorationFirstPublishedOneOffJob,
check_error=True,
error_type=base_models.BaseModel.EntityNotFoundError,
error_msg=(
'Entity for class ExplorationRightsModel with id '
'exp_id not found'),
function_to_be_called=rights_manager.get_exploration_rights,
exp_id=self.EXP_ID)
class ExplorationValidityJobManagerTests(test_utils.GenericTestBase):
ALBERT_EMAIL = '[email protected]'
ALBERT_NAME = 'albert'
VALID_EXP_ID = 'exp_id0'
NEW_EXP_ID = 'exp_id1'
EXP_TITLE = 'title'
def setUp(self):
super(ExplorationValidityJobManagerTests, self).setUp()
# Setup user who will own the test explorations.
self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL)
self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME)
self.process_and_flush_pending_tasks()
def test_validation_errors_are_not_raised_for_valid_exploration(self):
"""Checks validation errors are not raised for a valid exploration."""
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category',
objective='Test Exploration')
exploration.add_states(['End'])
intro_state = exploration.states['Introduction']
end_state = exploration.states['End']
intro_state.update_interaction_id('TextInput')
end_state.update_interaction_id('EndExploration')
default_outcome_dict = {
'dest': 'End',
'feedback': {
'content_id': 'default_outcome',
'html': '<p>Introduction</p>'
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
}
intro_state.update_interaction_default_outcome(default_outcome_dict)
end_state.update_interaction_default_outcome(None)
exp_services.save_new_exploration(self.albert_id, exploration)
# Start ExplorationValidityJobManager job on unpublished exploration.
job_id = exp_jobs_one_off.ExplorationValidityJobManager.create_new()
exp_jobs_one_off.ExplorationValidityJobManager.enqueue(job_id)
self.process_and_flush_pending_tasks()
actual_output = (
exp_jobs_one_off.ExplorationValidityJobManager.get_output(
job_id))
self.assertEqual(actual_output, [])
self.set_admins([self.ALBERT_NAME])
owner = user_services.UserActionsInfo(self.albert_id)
rights_manager.publish_exploration(owner, self.VALID_EXP_ID)
# Start ExplorationValidityJobManager job on published exploration.
job_id = exp_jobs_one_off.ExplorationValidityJobManager.create_new()
exp_jobs_one_off.ExplorationValidityJobManager.enqueue(job_id)
self.process_and_flush_pending_tasks()
actual_output = (
exp_jobs_one_off.ExplorationValidityJobManager.get_output(
job_id))
self.assertEqual(actual_output, [])
def test_strict_validation_errors_are_raised_for_published_exploration(
self):
"""Checks validation errors are not present for valid exploration."""
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exp_services.save_new_exploration(self.albert_id, exploration)
# Start ExplorationValidityJobManager job on unpublished exploration.
job_id = exp_jobs_one_off.ExplorationValidityJobManager.create_new()
exp_jobs_one_off.ExplorationValidityJobManager.enqueue(job_id)
self.process_and_flush_pending_tasks()
actual_output = (
exp_jobs_one_off.ExplorationValidityJobManager.get_output(
job_id))
self.assertEqual(actual_output, [])
self.set_admins([self.ALBERT_NAME])
owner = user_services.UserActionsInfo(self.albert_id)
rights_manager.publish_exploration(owner, self.VALID_EXP_ID)
# Start ExplorationValidityJobManager job on published exploration.
job_id = exp_jobs_one_off.ExplorationValidityJobManager.create_new()
exp_jobs_one_off.ExplorationValidityJobManager.enqueue(job_id)
self.process_and_flush_pending_tasks()
actual_output = (
exp_jobs_one_off.ExplorationValidityJobManager.get_output(
job_id))
expected_output = [(
'[u\'exp_id0\', '
'[u\'This state does not have any interaction specified.\']]'
)]
self.assertEqual(actual_output, expected_output)
exploration.states['Introduction'].update_interaction_id(
'TextInput')
exp_services.save_new_exploration(self.albert_id, exploration)
rights_manager.publish_exploration(owner, self.VALID_EXP_ID)
# Start ExplorationValidityJobManager job on published exploration.
job_id = exp_jobs_one_off.ExplorationValidityJobManager.create_new()
exp_jobs_one_off.ExplorationValidityJobManager.enqueue(job_id)
self.process_and_flush_pending_tasks()
actual_output = (
exp_jobs_one_off
.ExplorationValidityJobManager.get_output(job_id))
expected_output = [(
'[u\'exp_id0\', '
'[u"Please fix the following issues before saving this '
'exploration: 1. It is impossible to complete the exploration '
'from the following states: Introduction '
'2. An objective must be specified (in the \'Settings\' tab). "]]')]
self.assertEqual(actual_output, expected_output)
def test_no_action_is_performed_for_deleted_exploration(self):
"""Test that no action is performed on deleted explorations."""
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exp_services.save_new_exploration(self.albert_id, exploration)
self.set_admins([self.ALBERT_NAME])
owner = user_services.UserActionsInfo(self.albert_id)
rights_manager.publish_exploration(owner, self.VALID_EXP_ID)
exp_services.delete_exploration(self.albert_id, self.VALID_EXP_ID)
run_job_for_deleted_exp(
self, exp_jobs_one_off.ExplorationValidityJobManager)
class ExplorationMigrationJobTests(test_utils.GenericTestBase):
ALBERT_EMAIL = '[email protected]'
ALBERT_NAME = 'albert'
VALID_EXP_ID = 'exp_id0'
NEW_EXP_ID = 'exp_id1'
EXP_TITLE = 'title'
def setUp(self):
super(ExplorationMigrationJobTests, self).setUp()
# Setup user who will own the test explorations.
self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL)
self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME)
self.process_and_flush_pending_tasks()
def test_migration_job_does_not_convert_up_to_date_exp(self):
"""Tests that the exploration migration job does not convert an
exploration that is already the latest states schema version.
"""
# Create a new, default exploration that should not be affected by the
# job.
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('EndExploration')
init_state.update_interaction_default_outcome(None)
exp_services.save_new_exploration(self.albert_id, exploration)
self.assertEqual(
exploration.states_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
yaml_before_migration = exploration.to_yaml()
# Start migration job on sample exploration.
job_id = exp_jobs_one_off.ExplorationMigrationJobManager.create_new()
exp_jobs_one_off.ExplorationMigrationJobManager.enqueue(job_id)
self.process_and_flush_pending_tasks()
# Verify the exploration is exactly the same after migration.
updated_exp = exp_fetchers.get_exploration_by_id(self.VALID_EXP_ID)
self.assertEqual(
updated_exp.states_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
after_converted_yaml = updated_exp.to_yaml()
self.assertEqual(after_converted_yaml, yaml_before_migration)
def test_migration_job_does_not_have_validation_fail_on_default_exp(self):
"""Tests that the exploration migration job does not have a validation
failure for a default exploration (of states schema version 0), due to
the exploration having a null interaction ID in its initial state.
"""
self.save_new_exp_with_states_schema_v0(
self.NEW_EXP_ID, self.albert_id, self.EXP_TITLE)
# Start migration job on sample exploration.
job_id = exp_jobs_one_off.ExplorationMigrationJobManager.create_new()
exp_jobs_one_off.ExplorationMigrationJobManager.enqueue(job_id)
self.process_and_flush_pending_tasks()
# Verify the new exploration has been migrated by the job.
updated_exp = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID)
self.assertEqual(
updated_exp.states_schema_version,
feconf.CURRENT_STATE_SCHEMA_VERSION)
# Ensure the states structure within the exploration was changed.
self.assertNotEqual(
updated_exp.to_dict()['states'], self.VERSION_0_STATES_DICT)
def test_migration_job_skips_deleted_explorations(self):
"""Tests that the exploration migration job skips deleted explorations
and does not attempt to migrate.
"""
self.save_new_exp_with_states_schema_v0(
self.NEW_EXP_ID, self.albert_id, self.EXP_TITLE)
# Note: This creates a summary based on the upgraded model (which is
# fine). A summary is needed to delete the exploration.
exp_services.create_exploration_summary(
self.NEW_EXP_ID, None)
# Delete the exploration before migration occurs.
exp_services.delete_exploration(self.albert_id, self.NEW_EXP_ID)
# Ensure the exploration is deleted.
with self.assertRaisesRegexp(Exception, 'Entity .* not found'):
exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID)
# Start migration job on sample exploration.
job_id = exp_jobs_one_off.ExplorationMigrationJobManager.create_new()
exp_jobs_one_off.ExplorationMigrationJobManager.enqueue(job_id)
# This running without errors indicates the deleted exploration is
# being ignored, since otherwise exp_fetchers.get_exploration_by_id
# (used within the job) will raise an error.
self.process_and_flush_pending_tasks()
# Ensure the exploration is still deleted.
with self.assertRaisesRegexp(Exception, 'Entity .* not found'):
exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID)
def test_exploration_migration_job_output(self):
"""Test that Exploration Migration job output is correct."""
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exp_services.save_new_exploration(self.albert_id, exploration)
self.save_new_exp_with_states_schema_v0(
self.NEW_EXP_ID, self.albert_id, self.EXP_TITLE)
# Start migration job on sample exploration.
job_id = exp_jobs_one_off.ExplorationMigrationJobManager.create_new()
exp_jobs_one_off.ExplorationMigrationJobManager.enqueue(job_id)
self.process_and_flush_pending_tasks()
actual_output = (
exp_jobs_one_off.ExplorationMigrationJobManager.get_output(job_id))
expected_output = ['[u\'SUCCESS\', 1]']
self.assertEqual(actual_output, expected_output)
def test_migration_job_creates_appropriate_classifier_models(self):
"""Tests that the exploration migration job creates appropriate
classifier data models for explorations.
"""
self.save_new_exp_with_states_schema_v21(
self.NEW_EXP_ID, self.albert_id, self.EXP_TITLE)
exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID)
initial_state_name = exploration.states.keys()[0]
# Store classifier model for the new exploration.
classifier_model_id = classifier_models.ClassifierTrainingJobModel.create( # pylint: disable=line-too-long
'TextClassifier', 'TextInput', self.NEW_EXP_ID, exploration.version,
datetime.datetime.utcnow(), {}, initial_state_name,
feconf.TRAINING_JOB_STATUS_COMPLETE, None, 1)
# Store training job model for the classifier model.
classifier_models.TrainingJobExplorationMappingModel.create(
self.NEW_EXP_ID, exploration.version, initial_state_name,
classifier_model_id)
# Start migration job on sample exploration.
job_id = exp_jobs_one_off.ExplorationMigrationJobManager.create_new()
exp_jobs_one_off.ExplorationMigrationJobManager.enqueue(job_id)
with self.swap(feconf, 'ENABLE_ML_CLASSIFIERS', True):
with self.swap(feconf, 'MIN_TOTAL_TRAINING_EXAMPLES', 2):
with self.swap(feconf, 'MIN_ASSIGNED_LABELS', 1):
self.process_and_flush_pending_tasks()
new_exploration = exp_fetchers.get_exploration_by_id(self.NEW_EXP_ID)
initial_state_name = new_exploration.states.keys()[0]
self.assertLess(exploration.version, new_exploration.version)
classifier_exp_mapping_model = classifier_models.TrainingJobExplorationMappingModel.get_models( # pylint: disable=line-too-long
self.NEW_EXP_ID, new_exploration.version,
[initial_state_name])[0]
self.assertEqual(
classifier_exp_mapping_model.job_id, classifier_model_id)
def test_migration_job_fails_with_invalid_exploration(self):
observed_log_messages = []
def _mock_logging_function(msg, *args):
"""Mocks logging.error()."""
observed_log_messages.append(msg % args)
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exp_services.save_new_exploration(self.albert_id, exploration)
exploration_model = exp_models.ExplorationModel.get(self.VALID_EXP_ID)
exploration_model.language_code = 'invalid_language_code'
exploration_model.commit(
self.albert_id, 'Changed language_code.', [])
memcache_services.delete('exploration:%s' % self.VALID_EXP_ID)
job_id = exp_jobs_one_off.ExplorationMigrationJobManager.create_new()
exp_jobs_one_off.ExplorationMigrationJobManager.enqueue(job_id)
with self.swap(logging, 'error', _mock_logging_function):
self.process_and_flush_pending_tasks()
self.assertEqual(
observed_log_messages,
['Exploration %s failed non-strict validation: '
'Invalid language_code: invalid_language_code'
% (self.VALID_EXP_ID)])
class InteractionAuditOneOffJobTests(test_utils.GenericTestBase):
ALBERT_EMAIL = '[email protected]'
ALBERT_NAME = 'albert'
VALID_EXP_ID = 'exp_id0'
NEW_EXP_ID = 'exp_id1'
EXP_TITLE = 'title'
def setUp(self):
super(InteractionAuditOneOffJobTests, self).setUp()
# Setup user who will own the test explorations.
self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL)
self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME)
self.process_and_flush_pending_tasks()
def test_exp_state_pairs_are_produced_for_all_interactions_in_single_exp(
self):
"""Checks (exp, state) pairs are produced for all interactions
when there is single exploration.
"""
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exploration.add_states(['End'])
intro_state = exploration.states['Introduction']
end_state = exploration.states['End']
intro_state.update_interaction_id('TextInput')
end_state.update_interaction_id('EndExploration')
end_state.update_interaction_default_outcome(None)
exp_services.save_new_exploration(self.albert_id, exploration)
# Start InteractionAuditOneOff job on sample exploration.
job_id = exp_jobs_one_off.InteractionAuditOneOffJob.create_new()
exp_jobs_one_off.InteractionAuditOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
actual_output = (
exp_jobs_one_off.InteractionAuditOneOffJob.get_output(
job_id))
expected_output = [
'[u\'EndExploration\', [u\'exp_id0 End\']]',
'[u\'TextInput\', [u\'exp_id0 Introduction\']]']
self.assertEqual(actual_output, expected_output)
def test_exp_state_pairs_are_produced_for_all_interactions_in_multiple_exps(
self):
"""Checks (exp, state) pairs are produced for all interactions
when there are multiple explorations.
"""
exploration1 = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exploration1.add_states(['End'])
intro_state = exploration1.states['Introduction']
end_state = exploration1.states['End']
intro_state.update_interaction_id('TextInput')
end_state.update_interaction_id('EndExploration')
end_state.update_interaction_default_outcome(None)
exp_services.save_new_exploration(self.albert_id, exploration1)
exploration2 = exp_domain.Exploration.create_default_exploration(
self.NEW_EXP_ID, title='title', category='category')
exploration2.add_states(['End'])
intro_state = exploration2.states['Introduction']
end_state = exploration2.states['End']
intro_state.update_interaction_id('ItemSelectionInput')
end_state.update_interaction_id('EndExploration')
end_state.update_interaction_default_outcome(None)
exp_services.save_new_exploration(self.albert_id, exploration2)
# Start InteractionAuditOneOff job on sample explorations.
job_id = exp_jobs_one_off.InteractionAuditOneOffJob.create_new()
exp_jobs_one_off.InteractionAuditOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
actual_output = (
exp_jobs_one_off.InteractionAuditOneOffJob.get_output(
job_id))
actual_output_dict = {}
for item in [ast.literal_eval(value) for value in actual_output]:
actual_output_dict[item[0]] = set(item[1])
expected_output_dict = {
'EndExploration': set(['exp_id0 End', 'exp_id1 End']),
'ItemSelectionInput': set(['exp_id1 Introduction']),
'TextInput': set(['exp_id0 Introduction'])
}
self.assertEqual(actual_output_dict, expected_output_dict)
def test_no_action_is_performed_for_deleted_exploration(self):
"""Test that no action is performed on deleted explorations."""
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exp_services.save_new_exploration(self.albert_id, exploration)
exp_services.delete_exploration(self.albert_id, self.VALID_EXP_ID)
run_job_for_deleted_exp(
self, exp_jobs_one_off.InteractionAuditOneOffJob)
class ItemSelectionInteractionOneOffJobTests(test_utils.GenericTestBase):
ALBERT_EMAIL = '[email protected]'
ALBERT_NAME = 'albert'
VALID_EXP_ID = 'exp_id0'
NEW_EXP_ID = 'exp_id1'
EXP_TITLE = 'title'
def setUp(self):
super(ItemSelectionInteractionOneOffJobTests, self).setUp()
# Setup user who will own the test explorations.
self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL)
self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME)
self.process_and_flush_pending_tasks()
def test_exp_state_pairs_are_produced_only_for_desired_interactions(self):
"""Checks (exp, state) pairs are produced only for
desired interactions.
"""
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exploration.add_states(['State1', 'State2'])
state1 = exploration.states['State1']
state2 = exploration.states['State2']
state1.update_interaction_id('ItemSelectionInput')
state2.update_interaction_id('ItemSelectionInput')
customization_args_dict1 = {
'choices': {'value': [
'<p>This is value1 for ItemSelection</p>',
'<p>This is value2 for ItemSelection</p>',
]}
}
answer_group_list1 = [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value1 for ItemSelection</p>'
]}
}, {
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value2 for ItemSelection</p>'
]}
}],
'outcome': {
'dest': 'Introduction',
'feedback': {
'content_id': 'feedback',
'html': '<p>Outcome for state1</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
state1.update_interaction_customization_args(customization_args_dict1)
state1.update_interaction_answer_groups(answer_group_list1)
exp_services.save_new_exploration(self.albert_id, exploration)
# Start ItemSelectionInteractionOneOff job on sample exploration.
job_id = exp_jobs_one_off.ItemSelectionInteractionOneOffJob.create_new()
exp_jobs_one_off.ItemSelectionInteractionOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
actual_output = (
exp_jobs_one_off.ItemSelectionInteractionOneOffJob.get_output(
job_id))
self.assertEqual(actual_output, [])
customization_args_dict2 = {
'choices': {'value': [
'<p>This is value1 for ItemSelection</p>',
'<p>This is value2 for ItemSelection</p>',
]}
}
answer_group_list2 = [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value1 for ItemSelection</p>'
]}
}, {
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value3 for ItemSelection</p>'
]}
}],
'outcome': {
'dest': 'State1',
'feedback': {
'content_id': 'feedback',
'html': '<p>Outcome for state2</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
state2.update_interaction_customization_args(customization_args_dict2)
state2.update_interaction_answer_groups(answer_group_list2)
exp_services.save_new_exploration(self.albert_id, exploration)
# Start ItemSelectionInteractionOneOff job on sample exploration.
job_id = exp_jobs_one_off.ItemSelectionInteractionOneOffJob.create_new()
exp_jobs_one_off.ItemSelectionInteractionOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
actual_output = (
exp_jobs_one_off.ItemSelectionInteractionOneOffJob.get_output(
job_id))
expected_output = [(
u'[u\'exp_id0\', '
u'[u\'State2: <p>This is value3 for ItemSelection</p>\']]'
)]
self.assertEqual(actual_output, expected_output)
def test_no_action_is_performed_for_deleted_exploration(self):
"""Test that no action is performed on deleted explorations."""
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exploration.add_states(['State1'])
state1 = exploration.states['State1']
state1.update_interaction_id('ItemSelectionInput')
customization_args_dict = {
'choices': {'value': [
'<p>This is value1 for ItemSelection</p>',
'<p>This is value2 for ItemSelection</p>',
]}
}
answer_group_list = [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value1 for ItemSelection</p>'
]}
}, {
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value3 for ItemSelection</p>'
]}
}],
'outcome': {
'dest': 'State1',
'feedback': {
'content_id': 'feedback',
'html': '<p>Outcome for state2</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
state1.update_interaction_customization_args(customization_args_dict)
state1.update_interaction_answer_groups(answer_group_list)
exp_services.save_new_exploration(self.albert_id, exploration)
exp_services.delete_exploration(self.albert_id, self.VALID_EXP_ID)
run_job_for_deleted_exp(
self, exp_jobs_one_off.ItemSelectionInteractionOneOffJob)
class ViewableExplorationsAuditJobTests(test_utils.GenericTestBase):
ALBERT_EMAIL = '[email protected]'
ALBERT_NAME = 'albert'
VALID_EXP_ID = 'exp_id0'
NEW_EXP_ID = 'exp_id1'
EXP_TITLE = 'title'
def setUp(self):
super(ViewableExplorationsAuditJobTests, self).setUp()
# Setup user who will own the test explorations.
self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL)
self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME)
self.process_and_flush_pending_tasks()
def test_output_contains_only_viewable_private_explorations(self):
"""Checks that only viewable private explorations are present
in output.
"""
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exp_services.save_new_exploration(self.albert_id, exploration)
# Start ViewableExplorationsAudit job on sample exploration.
job_id = exp_jobs_one_off.ViewableExplorationsAuditJob.create_new()
exp_jobs_one_off.ViewableExplorationsAuditJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
actual_output = (
exp_jobs_one_off.ViewableExplorationsAuditJob.get_output(
job_id))
self.assertEqual(actual_output, [])
self.set_admins([self.ALBERT_NAME])
owner = user_services.UserActionsInfo(self.albert_id)
rights_manager.set_private_viewability_of_exploration(
owner, self.VALID_EXP_ID, True)
# Start ViewableExplorationsAudit job on sample exploration.
job_id = exp_jobs_one_off.ViewableExplorationsAuditJob.create_new()
exp_jobs_one_off.ViewableExplorationsAuditJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
actual_output = (
exp_jobs_one_off.ViewableExplorationsAuditJob.get_output(
job_id))
expected_output = ['[u\'exp_id0\', [u\'title\']]']
self.assertEqual(actual_output, expected_output)
rights_manager.publish_exploration(owner, self.VALID_EXP_ID)
# Start ViewableExplorationsAudit job on sample exploration.
job_id = exp_jobs_one_off.ViewableExplorationsAuditJob.create_new()
exp_jobs_one_off.ViewableExplorationsAuditJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
actual_output = (
exp_jobs_one_off.ViewableExplorationsAuditJob.get_output(
job_id))
self.assertEqual(actual_output, [])
def test_no_action_is_performed_when_exploration_rights_is_none(self):
"""Test that no action is performed when exploration rights is none."""
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exp_services.save_new_exploration(self.albert_id, exploration)
self.set_admins([self.ALBERT_NAME])
owner = user_services.UserActionsInfo(self.albert_id)
rights_manager.set_private_viewability_of_exploration(
owner, self.VALID_EXP_ID, True)
exp_rights_model = exp_models.ExplorationRightsModel.get(
self.VALID_EXP_ID)
exp_rights_model.delete(feconf.SYSTEM_COMMITTER_ID, 'Delete model')
# Start ViewableExplorationsAudit job on sample exploration.
job_id = exp_jobs_one_off.ViewableExplorationsAuditJob.create_new()
exp_jobs_one_off.ViewableExplorationsAuditJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
actual_output = (
exp_jobs_one_off.ViewableExplorationsAuditJob.get_output(
job_id))
self.assertEqual(actual_output, [])
def test_no_action_is_performed_for_deleted_exploration(self):
"""Test that no action is performed on deleted explorations."""
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exp_services.save_new_exploration(self.albert_id, exploration)
self.set_admins([self.ALBERT_NAME])
owner = user_services.UserActionsInfo(self.albert_id)
rights_manager.set_private_viewability_of_exploration(
owner, self.VALID_EXP_ID, True)
exp_services.delete_exploration(self.albert_id, self.VALID_EXP_ID)
run_job_for_deleted_exp(
self, exp_jobs_one_off.ViewableExplorationsAuditJob)
class HintsAuditOneOffJobTests(test_utils.GenericTestBase):
ALBERT_EMAIL = '[email protected]'
ALBERT_NAME = 'albert'
VALID_EXP_ID = 'exp_id0'
NEW_EXP_ID = 'exp_id1'
EXP_TITLE = 'title'
def setUp(self):
super(HintsAuditOneOffJobTests, self).setUp()
# Setup user who will own the test explorations.
self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL)
self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME)
self.process_and_flush_pending_tasks()
def test_number_of_hints_tabulated_are_correct_in_single_exp(self):
"""Checks that correct number of hints are tabulated when
there is single exploration.
"""
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exploration.add_states(['State1', 'State2', 'State3'])
state1 = exploration.states['State1']
state2 = exploration.states['State2']
hint_list1 = [{
'hint_content': {
'content_id': 'hint1',
'html': '<p>Hello, this is html1 for state1</p>'
}
}, {
'hint_content': {
'content_id': 'hint2',
'html': '<p>Hello, this is html2 for state1</p>'
}
}]
hint_list2 = [{
'hint_content': {
'content_id': 'hint1',
'html': '<p>Hello, this is html1 for state2</p>'
}
}]
state1.update_interaction_hints(hint_list1)
state2.update_interaction_hints(hint_list2)
exp_services.save_new_exploration(self.albert_id, exploration)
# Start HintsAuditOneOff job on sample exploration.
job_id = exp_jobs_one_off.HintsAuditOneOffJob.create_new()
exp_jobs_one_off.HintsAuditOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
actual_output = exp_jobs_one_off.HintsAuditOneOffJob.get_output(job_id)
expected_output = [
'[u\'1\', [u\'exp_id0 State2\']]',
'[u\'2\', [u\'exp_id0 State1\']]'
]
self.assertEqual(actual_output, expected_output)
def test_number_of_hints_tabulated_are_correct_in_multiple_exps(self):
"""Checks that correct number of hints are tabulated when
there are multiple explorations.
"""
exploration1 = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exploration1.add_states(['State1', 'State2', 'State3'])
state1 = exploration1.states['State1']
state2 = exploration1.states['State2']
hint_list1 = [{
'hint_content': {
'content_id': 'hint1',
'html': '<p>Hello, this is html1 for state1</p>'
}
}, {
'hint_content': {
'content_id': 'hint2',
'html': '<p>Hello, this is html2 for state1</p>'
}
}]
hint_list2 = [{
'hint_content': {
'content_id': 'hint1',
'html': '<p>Hello, this is html1 for state2</p>'
}
}]
state1.update_interaction_hints(hint_list1)
state2.update_interaction_hints(hint_list2)
exp_services.save_new_exploration(self.albert_id, exploration1)
exploration2 = exp_domain.Exploration.create_default_exploration(
self.NEW_EXP_ID, title='title', category='category')
exploration2.add_states(['State1', 'State2'])
state1 = exploration2.states['State1']
hint_list1 = [{
'hint_content': {
'content_id': 'hint1',
'html': '<p>Hello, this is html1 for state1</p>'
}
}]
state1.update_interaction_hints(hint_list1)
exp_services.save_new_exploration(self.albert_id, exploration2)
# Start HintsAuditOneOff job on sample exploration.
job_id = exp_jobs_one_off.HintsAuditOneOffJob.create_new()
exp_jobs_one_off.HintsAuditOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
actual_output = exp_jobs_one_off.HintsAuditOneOffJob.get_output(job_id)
actual_output_dict = {}
for item in [ast.literal_eval(value) for value in actual_output]:
actual_output_dict[item[0]] = set(item[1])
expected_output_dict = {
'1': set(['exp_id0 State2', 'exp_id1 State1']),
'2': set(['exp_id0 State1'])
}
self.assertEqual(actual_output_dict, expected_output_dict)
def test_no_action_is_performed_for_deleted_exploration(self):
"""Test that no action is performed on deleted explorations."""
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exploration.add_states(['State1'])
state1 = exploration.states['State1']
hint_list = [{
'hint_content': {
'content_id': 'hint1',
'html': '<p>Hello, this is html1 for state1</p>'
}
}, {
'hint_content': {
'content_id': 'hint2',
'html': '<p>Hello, this is html2 for state1</p>'
}
}]
state1.update_interaction_hints(hint_list)
exp_services.save_new_exploration(self.albert_id, exploration)
exp_services.delete_exploration(self.albert_id, self.VALID_EXP_ID)
run_job_for_deleted_exp(self, exp_jobs_one_off.HintsAuditOneOffJob)
class ExplorationContentValidationJobForCKEditorTests(
test_utils.GenericTestBase):
ALBERT_EMAIL = '[email protected]'
ALBERT_NAME = 'albert'
VALID_EXP_ID = 'exp_id0'
NEW_EXP_ID = 'exp_id1'
EXP_TITLE = 'title'
def setUp(self):
super(ExplorationContentValidationJobForCKEditorTests, self).setUp()
# Setup user who will own the test explorations.
self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL)
self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME)
self.process_and_flush_pending_tasks()
def test_for_validation_job(self):
"""Tests that the exploration validation job validates the content
without skipping any tags.
"""
# This mock should only be used for
# ExplorationContentValidationJobForCKEditor.
# The job finds invalid strings in an exploration.
# If we do not use the mock, some of the strings will be converted
# to a valid format during initialization of subtitled html
# in state.
def mock_convert_to_ckeditor(html_data):
return html_data
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exploration.add_states(['State1', 'State2', 'State3'])
state1 = exploration.states['State1']
state2 = exploration.states['State2']
state3 = exploration.states['State3']
content1_dict = {
'content_id': 'content',
'html': (
'<p>Lorem ipsum </p><p> Hello this is oppia </p>'
)
}
state1.update_content(content1_dict)
exp_services.save_new_exploration(self.albert_id, exploration)
# Start validation job on sample exploration.
job_id = (
exp_jobs_one_off
.ExplorationContentValidationJobForCKEditor.create_new())
exp_jobs_one_off.ExplorationContentValidationJobForCKEditor.enqueue(
job_id)
self.process_and_flush_pending_tasks()
actual_output = (
exp_jobs_one_off
.ExplorationContentValidationJobForCKEditor.get_output(job_id))
expected_output = []
self.assertEqual(actual_output, expected_output)
content1_dict = {
'content_id': 'content',
'html': (
'<p>Lorem <span>ipsum </span></p> Hello this is '
'<code>oppia </code>'
)
}
content2_dict = {
'content_id': 'content',
'html': (
'<p><oppia-noninteractive-image filepath-with-value="amp;quot;'
'random.png&quot;"></oppia-noninteractive-image>Hello this '
'is test case to check image tag inside p tag</p>'
)
}
content3_dict = {
'content_id': 'content',
'html': (
'<oppia-noninteractive-collapsible content-with-value="&'
'quot;&lt;pre&gt;&lt;p&gt;lorem ipsum&'
'amp;lt;/p&gt;&lt;/pre&gt;'
'&quot;" heading-with-value="&quot;'
'lorem ipsum&quot;lorem ipsum&quot;?&quot;">'
'</oppia-noninteractive-collapsible>'
)
}
default_outcome_dict1 = {
'dest': 'State2',
'feedback': {
'content_id': 'default_outcome',
'html': (
'<ol><ol><li>Item1</li></ol><li>Item2</li></ol>'
)
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
}
default_outcome_dict2 = {
'dest': 'State1',
'feedback': {
'content_id': 'default_outcome',
'html': (
'<pre>Hello this is <b> testing '
'<oppia-noninteractive-image filepath-with-value="amp;quot;'
'random.png&quot;"></oppia-noninteractive-image> in '
'</b>progress</pre>'
)
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
}
mock_convert_to_ckeditor_context = self.swap(
html_validation_service, 'convert_to_ckeditor',
mock_convert_to_ckeditor)
mock_validate_context = self.swap(
state_domain.SubtitledHtml, 'validate', mock_validate)
with mock_validate_context, mock_convert_to_ckeditor_context:
state1.update_content(content1_dict)
state2.update_content(content2_dict)
state3.update_content(content3_dict)
state1.update_interaction_default_outcome(default_outcome_dict1)
state2.update_interaction_default_outcome(default_outcome_dict2)
exp_services.save_new_exploration(self.albert_id, exploration)
job_id = (
exp_jobs_one_off
.ExplorationContentValidationJobForCKEditor.create_new())
exp_jobs_one_off.ExplorationContentValidationJobForCKEditor.enqueue(
job_id)
self.process_and_flush_pending_tasks()
actual_output = (
exp_jobs_one_off
.ExplorationContentValidationJobForCKEditor.get_output(job_id))
expected_output = [
'[u\'invalidTags\', [u\'span\', u\'code\', u\'b\', '
'u\'Exp Id: exp_id0\']]',
'[u\'ol\', [u\'ol\', u\'Exp Id: exp_id0\']]',
'[u\'oppia-noninteractive-image\', [u\'p\', u\'b\', '
'u\'Exp Id: exp_id0\']]',
'[u\'p\', [u\'pre\', u\'Exp Id: exp_id0\']]',
(
'[u\'strings\', '
'[u\'<p>Lorem <span>ipsum </span></p> Hello this is <code>'
'oppia </code>\', u\'<pre>Hello this is <b> testing <oppia-'
'noninteractive-image filepath-with-value="amp;quot;random.'
'png&quot;"></oppia-noninteractive-image>'
' in </b>progress</pre>\', '
'u\'<ol><ol><li>Item1</li></ol><li>Item2</li></ol>\', '
'u\'<p><oppia-noninteractive-image filepath-with-value="'
'amp;quot;random.png&quot;"></oppia-noninteractive-image>'
'Hello this is test case to check '
'image tag inside p tag</p>\', '
'u\'<oppia-noninteractive-collapsible content-'
'with-value="&quot;&lt;pre&gt;&lt;'
'p&gt;lorem ipsum&lt;/p&gt;&lt;/pre&'
'gt;&quot;" heading-with-value="&quot;lorem '
'ipsum&quot;lorem ipsum&quot;?&quot;">'
'</oppia-noninteractive-collapsible>\', u\'Exp Id: exp_id0\']]'
)
]
self.assertEqual(actual_output, expected_output)
def test_no_action_is_performed_for_deleted_exploration(self):
"""Test that no action is performed on deleted explorations."""
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exploration.add_states(['State1'])
content_dict = {
'html': '<code>Hello</code>',
'content_id': 'content'
}
state1 = exploration.states['State1']
with self.swap(
state_domain.SubtitledHtml, 'validate', mock_validate):
state1.update_content(content_dict)
exp_services.save_new_exploration(self.albert_id, exploration)
exp_services.delete_exploration(self.albert_id, self.VALID_EXP_ID)
run_job_for_deleted_exp(
self,
exp_jobs_one_off.ExplorationContentValidationJobForCKEditor)
def test_validation_job_fails_for_invalid_schema_version(self):
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exp_services.save_new_exploration(self.albert_id, exploration)
exploration_model = exp_models.ExplorationModel.get(self.VALID_EXP_ID)
exploration_model.states_schema_version = 100
exploration_model.commit(
self.albert_id, 'Changed states_schema_version.', [])
memcache_services.delete('exploration:%s' % self.VALID_EXP_ID)
job_id = (
exp_jobs_one_off
.ExplorationContentValidationJobForCKEditor.create_new())
exp_jobs_one_off.ExplorationContentValidationJobForCKEditor.enqueue(
job_id)
self.process_and_flush_pending_tasks()
actual_output = (
exp_jobs_one_off
.ExplorationContentValidationJobForCKEditor.get_output(job_id))
expected_output = [
u'[u\'Error Sorry, we can only process v1-v%s and unversioned '
'exploration state schemas at present. when loading exploration\', '
'[u\'exp_id0\']]' % feconf.CURRENT_STATE_SCHEMA_VERSION]
self.assertEqual(actual_output, expected_output)
class InteractionCustomizationArgsValidationJobTests(
test_utils.GenericTestBase):
ALBERT_EMAIL = '[email protected]'
ALBERT_NAME = 'albert'
VALID_EXP_ID = 'exp_id0'
NEW_EXP_ID = 'exp_id1'
EXP_TITLE = 'title'
def setUp(self):
super(
InteractionCustomizationArgsValidationJobTests, self).setUp()
# Setup user who will own the test explorations.
self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL)
self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME)
self.process_and_flush_pending_tasks()
def test_for_customization_arg_validation_job(self):
"""Validates customization args for rich text components."""
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exploration.add_states(['State1', 'State2', 'State3'])
state1 = exploration.states['State1']
state2 = exploration.states['State2']
state3 = exploration.states['State3']
content1_dict = {
'content_id': 'content',
'html': (
'<oppia-noninteractive-tabs tab_contents-with-value="'
'[{&quot;content&quot;: &quot;&lt;p&'
'gt;lorem ipsum&lt;/p&gt;&quot;, &quot;'
'title&quot;: &quot;hello&quot;}, {&'
'quot;content&quot;: &quot;&lt;p&gt;'
'oppia&lt;/p&gt;&quot;, &'
'quot;title&quot;: &quot;Savjet 1&quot;}]">'
'</oppia-noninteractive-tabs>'
)
}
default_outcome_dict2 = {
'dest': 'State1',
'feedback': {
'content_id': 'default_outcome',
'html': (
'<p><oppia-noninteractive-link text-with-value="'
'&quot;What is a link?&quot;" url-with-'
'value="&quot;htt://link.com&'
';quot;"></oppia-noninteractive-link></p>'
)
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
}
content3_dict = {
'content_id': 'content',
'html': (
'<oppia-noninteractive-image alt-with-value="&quot;A '
'circle divided into equal fifths.&quot;" '
'caption-with-value="&quot;Hello&quot;" '
'filepath-with-value="&quot;xy.z.png&quot;">'
'</oppia-noninteractive-image>'
)
}
with self.swap(state_domain.SubtitledHtml, 'validate', mock_validate):
state1.update_content(content1_dict)
state2.update_interaction_default_outcome(default_outcome_dict2)
state3.update_content(content3_dict)
exp_services.save_new_exploration(self.albert_id, exploration)
# Start CustomizationArgsValidation job on sample exploration.
job_id = (
exp_jobs_one_off
.InteractionCustomizationArgsValidationJob.create_new())
exp_jobs_one_off.InteractionCustomizationArgsValidationJob.enqueue(
job_id)
self.process_and_flush_pending_tasks()
actual_output = (
exp_jobs_one_off
.InteractionCustomizationArgsValidationJob.get_output(job_id))
expected_output = [(
'[u"Invalid URL: Sanitized URL should start with \'http://\' or \''
'https://\'; received htt://link.com", '
'[u\'<p><oppia-noninteractive-link text-with-value="&quot;What '
'is a link?&quot;" url-with-value="&quot;htt://link.com'
'&quot;"></oppia-noninteractive-link></p>\', '
'u\'Exp Id: exp_id0\']]'
), (
'[u\'Invalid filepath\', '
'[u\'<oppia-noninteractive-image alt-with-value="&quot;A '
'circle divided into equal fifths.&quot;" caption-with-value'
'="&quot;Hello&quot;" filepath-with-value="&quot;xy.z.'
'png&quot;"></oppia-noninteractive-image>\', '
'u\'Exp Id: exp_id0\']]'
)]
self.assertEqual(actual_output, expected_output)
def test_no_action_is_performed_for_deleted_exploration(self):
"""Test that no action is performed on deleted explorations."""
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exploration.add_states(['State1'])
content_dict = {
'html': (
'<p><oppia-noninteractive-link text-with-value="'
'&quot;What is a link?&quot;" url-with-'
'value="&quot;htt://link.com&'
';quot;"></oppia-noninteractive-link></p>'
),
'content_id': 'content'
}
state1 = exploration.states['State1']
with self.swap(state_domain.SubtitledHtml, 'validate', mock_validate):
state1.update_content(content_dict)
exp_services.save_new_exploration(self.albert_id, exploration)
exp_services.delete_exploration(self.albert_id, self.VALID_EXP_ID)
run_job_for_deleted_exp(
self,
exp_jobs_one_off.InteractionCustomizationArgsValidationJob)
def test_validation_job_fails_for_invalid_schema_version(self):
exploration = exp_domain.Exploration.create_default_exploration(
self.VALID_EXP_ID, title='title', category='category')
exp_services.save_new_exploration(self.albert_id, exploration)
exploration_model = exp_models.ExplorationModel.get(self.VALID_EXP_ID)
exploration_model.states_schema_version = 100
exploration_model.commit(
self.albert_id, 'Changed states_schema_version.', [])
memcache_services.delete('exploration:%s' % self.VALID_EXP_ID)
job_id = (
exp_jobs_one_off
.InteractionCustomizationArgsValidationJob.create_new())
exp_jobs_one_off.InteractionCustomizationArgsValidationJob.enqueue(
job_id)
self.process_and_flush_pending_tasks()
actual_output = (
exp_jobs_one_off
.InteractionCustomizationArgsValidationJob.get_output(job_id))
expected_output = [
u'[u\'Error Sorry, we can only process v1-v%s and unversioned '
'exploration state schemas at present. when loading exploration\', '
'[u\'exp_id0\']]' % feconf.CURRENT_STATE_SCHEMA_VERSION]
self.assertEqual(actual_output, expected_output)
class TranslatorToVoiceArtistOneOffJobTests(test_utils.GenericTestBase):
ONE_OFF_JOB_MANAGERS_FOR_TESTS = [
exp_jobs_one_off.TranslatorToVoiceArtistOneOffJob]
EXP_ID = 'exp_id'
USERNAME_A = 'usernamea'
USERNAME_B = 'usernameb'
EMAIL_A = '[email protected]'
EMAIL_B = '[email protected]'
def setUp(self):
super(TranslatorToVoiceArtistOneOffJobTests, self).setUp()
self.signup(self.EMAIL_A, self.USERNAME_A)
self.signup(self.EMAIL_B, self.USERNAME_B)
self.user_a_id = self.get_user_id_from_email(self.EMAIL_A)
self.user_b_id = self.get_user_id_from_email(self.EMAIL_B)
def test_action_is_performed_when_translator_ids_exists(self):
"""Test translator_ids are migrated to voice_artist_ids successfully."""
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.user_a_id, title='Exploration Title 1')
rights_manager.create_new_exploration_rights(
exploration.id, self.user_a_id)
exp_rights_model = exp_models.ExplorationRightsModel.get(
exploration.id)
exp_rights_model.translator_ids = [self.user_a_id, self.user_b_id]
commit_message = 'Assign a translator for test'
commit_cmds = [{
'cmd': 'change_role',
'assignee_id': self.user_a_id,
'new_role': 'translator'
}, {
'cmd': 'change_role',
'assignee_id': self.user_b_id,
'new_role': 'translator'
}]
exp_rights_model.commit(self.user_a_id, commit_message, commit_cmds)
exp_summary_model = exp_models.ExpSummaryModel(
id=exploration.id,
title='title',
category='category',
objective='Old objective',
language_code='en',
community_owned=exp_rights_model.community_owned,
translator_ids=[self.user_a_id, self.user_b_id]
)
exp_summary_model.put()
job_id = (
exp_jobs_one_off.TranslatorToVoiceArtistOneOffJob.create_new())
exp_jobs_one_off.TranslatorToVoiceArtistOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
actual_output = (
exp_jobs_one_off.TranslatorToVoiceArtistOneOffJob.get_output(
job_id)
)
expected_output = ['[u\'SUCCESS\', 1]']
self.assertEqual(actual_output, expected_output)
exp_rights_model_2 = exp_models.ExplorationRightsModel.get(
exploration.id)
self.assertEqual([], exp_rights_model_2.translator_ids)
self.assertEqual(
[self.user_a_id, self.user_b_id],
exp_rights_model_2.voice_artist_ids
)
exp_summary_model_2 = exp_models.ExpSummaryModel.get(exploration.id)
self.assertEqual([], exp_summary_model_2.translator_ids)
self.assertEqual(
[self.user_a_id, self.user_b_id],
exp_summary_model_2.voice_artist_ids
)
def test_partial_job_is_performed_for_deleted_exploration_summary(self):
"""Tests that when ExplorationRightsModel exists but ExpSummaryModel
does not exist or is deleted, action is only performed for rights model,
and returns the id of corresponding exploration.
"""
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.user_a_id, title='Exploration Title 1')
rights_manager.create_new_exploration_rights(
exploration.id, self.user_a_id)
exp_rights_model = exp_models.ExplorationRightsModel.get(
exploration.id)
exp_rights_model.translator_ids = [self.user_b_id]
commit_message = 'Assign a translator for test'
commit_cmds = [{
'cmd': 'change_role',
'assignee_ids': self.user_b_id,
'new_role': 'translator'
}]
exp_rights_model.commit(self.user_a_id, commit_message, commit_cmds)
exp_summary_model = exp_models.ExpSummaryModel(
id=exploration.id,
title='title',
category='category',
objective='Old objective',
language_code='en',
community_owned=exp_rights_model.community_owned,
translator_ids=[self.user_b_id]
)
exp_summary_model.put()
exp_services.delete_exploration_summary(exploration.id)
job_id = (
exp_jobs_one_off.TranslatorToVoiceArtistOneOffJob.create_new())
exp_jobs_one_off.TranslatorToVoiceArtistOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
actual_output = (
exp_jobs_one_off.TranslatorToVoiceArtistOneOffJob.get_output(
job_id)
)
expected_output = ['[u\'Summary model does not exist or is '
'deleted\', [u\'exp_id\']]']
self.assertEqual(actual_output, expected_output)
exp_rights_model_2 = exp_models.ExplorationRightsModel.get(
exploration.id)
self.assertEqual([], exp_rights_model_2.translator_ids)
self.assertEqual([self.user_b_id], exp_rights_model_2.voice_artist_ids)
def test_no_action_is_performed_for_deleted_exploration(self):
"""Tests that no action is performed when an exploration is deleted."""
exp_id = '100'
self.save_new_valid_exploration(exp_id, self.user_a_id)
exp_services.delete_exploration(self.user_a_id, exp_id)
run_job_for_deleted_exp(
self, exp_jobs_one_off.TranslatorToVoiceArtistOneOffJob)
class DeleteStateIdMappingModelsOneOffJobTests(test_utils.GenericTestBase):
"""Tests the state ID mapping deletion job."""
def test_job_deletes_all_instances_of_model(self):
exp_models.StateIdMappingModel.create(
'exp_id', 1, {'state_1': 1}, 1)
exp_models.StateIdMappingModel.create(
'exp_id', 2, {'state_1': 1}, 1)
exp_models.StateIdMappingModel.create(
'exp_id', 3, {'state_1': 1}, 1)
self.assertIsNotNone(
exp_models.StateIdMappingModel.get_state_id_mapping_model(
'exp_id', 1))
self.assertIsNotNone(
exp_models.StateIdMappingModel.get_state_id_mapping_model(
'exp_id', 2))
self.assertIsNotNone(
exp_models.StateIdMappingModel.get_state_id_mapping_model(
'exp_id', 3))
self.assertEqual(self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 0)
job_id = (
exp_jobs_one_off.DeleteStateIdMappingModelsOneOffJob.create_new())
exp_jobs_one_off.DeleteStateIdMappingModelsOneOffJob.enqueue(job_id)
self.assertEqual(self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 0)
with self.assertRaisesRegexp(
base_models.BaseModel.EntityNotFoundError,
'Entity for class StateIdMappingModel with id exp_id.1 not found'):
exp_models.StateIdMappingModel.get_state_id_mapping_model(
'exp_id', 1)
with self.assertRaisesRegexp(
base_models.BaseModel.EntityNotFoundError,
'Entity for class StateIdMappingModel with id exp_id.2 not found'):
exp_models.StateIdMappingModel.get_state_id_mapping_model(
'exp_id', 2)
with self.assertRaisesRegexp(
base_models.BaseModel.EntityNotFoundError,
'Entity for class StateIdMappingModel with id exp_id.3 not found'):
exp_models.StateIdMappingModel.get_state_id_mapping_model(
'exp_id', 3)
| apache-2.0 | -433,900,859,914,173,500 | 39.780531 | 135 | 0.613851 | false |
sunlightlabs/tcamp | tcamp/sked/migrations/0012_auto__add_field_location_has_sessions.py | 1 | 9931 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Location.has_sessions'
db.add_column(u'sked_location', 'has_sessions',
self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Location.has_sessions'
db.delete_column(u'sked_location', 'has_sessions')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sked.event': {
'Meta': {'ordering': "('-start_date',)", 'object_name': 'Event'},
'_description_rendered': ('django.db.models.fields.TextField', [], {}),
'_overview_rendered': ('django.db.models.fields.TextField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sked_events'", 'to': u"orm['auth.User']"}),
'description': ('markupfield.fields.MarkupField', [], {'rendered_field': 'True', 'blank': 'True'}),
'description_markup_type': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '30', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'event'", 'max_length': '64'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'overview': ('markupfield.fields.MarkupField', [], {'rendered_field': 'True', 'blank': 'True'}),
'overview_markup_type': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '30', 'blank': 'True'}),
'registration_is_open': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'registration_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'session_label': ('django.db.models.fields.CharField', [], {'default': "'session'", 'max_length': '64'}),
'session_length': ('timedelta.fields.TimedeltaField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'sked.location': {
'Meta': {'ordering': "('-event__start_date', 'name')", 'object_name': 'Location'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'locations'", 'to': u"orm['sked.Event']"}),
'has_sessions': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_official': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'sked.session': {
'Meta': {'ordering': "('-event__start_date', 'start_time')", 'unique_together': "(('event', 'slug'),)", 'object_name': 'Session'},
'_description_rendered': ('django.db.models.fields.TextField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('markupfield.fields.MarkupField', [], {'rendered_field': 'True', 'blank': 'True'}),
'description_markup_type': ('django.db.models.fields.CharField', [], {'default': "'markdown'", 'max_length': '30', 'blank': 'True'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sessions'", 'to': u"orm['sked.Event']"}),
'extra_data': ('jsonfield.fields.JSONField', [], {'default': "'{}'", 'blank': 'True'}),
'has_notes': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sessions'", 'null': 'True', 'to': u"orm['sked.Location']"}),
'published_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'approved_sked_sessions'", 'null': 'True', 'to': u"orm['auth.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'speakers': ('jsonfield.fields.JSONField', [], {'default': "'[]'", 'db_index': 'True', 'blank': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['sked'] | bsd-3-clause | -2,078,722,091,345,254,700 | 77.204724 | 187 | 0.551908 | false |
tangowhisky37/RaspiPythonProjects | Write_To_LCD_Screen/RPi_I2C_driver.py | 1 | 4851 | # -*- coding: utf-8 -*-
"""
Compiled, mashed and generally mutilated 2014-2015 by Denis Pleic
Made available under GNU GENERAL PUBLIC LICENSE
# Modified Python I2C library for Raspberry Pi
# as found on http://www.recantha.co.uk/blog/?p=4849
# Joined existing 'i2c_lib.py' and 'lcddriver.py' into a single library
# added bits and pieces from various sources
# By DenisFromHR (Denis Pleic)
# 2015-02-10, ver 0.1
"""
#
#
import smbus
from time import *
class i2c_device:
def __init__(self, addr, port=1):
self.addr = addr
self.bus = smbus.SMBus(port)
# Write a single command
def write_cmd(self, cmd):
self.bus.write_byte(self.addr, cmd)
sleep(0.0001)
# Write a command and argument
def write_cmd_arg(self, cmd, data):
self.bus.write_byte_data(self.addr, cmd, data)
sleep(0.0001)
# Write a block of data
def write_block_data(self, cmd, data):
self.bus.write_block_data(self.addr, cmd, data)
sleep(0.0001)
# Read a single byte
def read(self):
return self.bus.read_byte(self.addr)
# Read
def read_data(self, cmd):
return self.bus.read_byte_data(self.addr, cmd)
# Read a block of data
def read_block_data(self, cmd):
return self.bus.read_block_data(self.addr, cmd)
# LCD Address
#ADDRESS = 0x27
ADDRESS = 0x3f
# commands
LCD_CLEARDISPLAY = 0x01
LCD_RETURNHOME = 0x02
LCD_ENTRYMODESET = 0x04
LCD_DISPLAYCONTROL = 0x08
LCD_CURSORSHIFT = 0x10
LCD_FUNCTIONSET = 0x20
LCD_SETCGRAMADDR = 0x40
LCD_SETDDRAMADDR = 0x80
# flags for display entry mode
LCD_ENTRYRIGHT = 0x00
LCD_ENTRYLEFT = 0x02
LCD_ENTRYSHIFTINCREMENT = 0x01
LCD_ENTRYSHIFTDECREMENT = 0x00
# flags for display on/off control
LCD_DISPLAYON = 0x04
LCD_DISPLAYOFF = 0x00
LCD_CURSORON = 0x02
LCD_CURSOROFF = 0x00
LCD_BLINKON = 0x01
LCD_BLINKOFF = 0x00
# flags for display/cursor shift
LCD_DISPLAYMOVE = 0x08
LCD_CURSORMOVE = 0x00
LCD_MOVERIGHT = 0x04
LCD_MOVELEFT = 0x00
# flags for function set
LCD_8BITMODE = 0x10
LCD_4BITMODE = 0x00
LCD_2LINE = 0x08
LCD_1LINE = 0x00
LCD_5x10DOTS = 0x04
LCD_5x8DOTS = 0x00
# flags for backlight control
LCD_BACKLIGHT = 0x08
LCD_NOBACKLIGHT = 0x00
En = 0b00000100 # Enable bit
Rw = 0b00000010 # Read/Write bit
Rs = 0b00000001 # Register select bit
class lcd:
#initializes objects and lcd
def __init__(self):
self.lcd_device = i2c_device(ADDRESS)
self.lcd_write(0x03)
self.lcd_write(0x03)
self.lcd_write(0x03)
self.lcd_write(0x02)
self.lcd_write(LCD_FUNCTIONSET | LCD_2LINE | LCD_5x8DOTS | LCD_4BITMODE)
self.lcd_write(LCD_DISPLAYCONTROL | LCD_DISPLAYON)
self.lcd_write(LCD_CLEARDISPLAY)
self.lcd_write(LCD_ENTRYMODESET | LCD_ENTRYLEFT)
sleep(0.2)
# clocks EN to latch command
def lcd_strobe(self, data):
self.lcd_device.write_cmd(data | En | LCD_BACKLIGHT)
sleep(.0005)
self.lcd_device.write_cmd(((data & ~En) | LCD_BACKLIGHT))
sleep(.0001)
def lcd_write_four_bits(self, data):
self.lcd_device.write_cmd(data | LCD_BACKLIGHT)
self.lcd_strobe(data)
# write a command to lcd
def lcd_write(self, cmd, mode=0):
self.lcd_write_four_bits(mode | (cmd & 0xF0))
self.lcd_write_four_bits(mode | ((cmd << 4) & 0xF0))
# write a character to lcd (or character rom) 0x09: backlight | RS=DR<
# works!
def lcd_write_char(self, charvalue, mode=1):
self.lcd_write_four_bits(mode | (charvalue & 0xF0))
self.lcd_write_four_bits(mode | ((charvalue << 4) & 0xF0))
# put string function
def lcd_display_string(self, string, line):
if line == 1:
self.lcd_write(0x80)
if line == 2:
self.lcd_write(0xC0)
if line == 3:
self.lcd_write(0x94)
if line == 4:
self.lcd_write(0xD4)
for char in string:
self.lcd_write(ord(char), Rs)
# clear lcd and set to home
def lcd_clear(self):
self.lcd_write(LCD_CLEARDISPLAY)
self.lcd_write(LCD_RETURNHOME)
# define backlight on/off (lcd.backlight(1); off= lcd.backlight(0)
def backlight(self, state): # for state, 1 = on, 0 = off
if state == 1:
self.lcd_device.write_cmd(LCD_BACKLIGHT)
elif state == 0:
self.lcd_device.write_cmd(LCD_NOBACKLIGHT)
# add custom characters (0 - 7)
def lcd_load_custom_chars(self, fontdata):
self.lcd_write(0x40);
for char in fontdata:
for line in char:
self.lcd_write_char(line)
# define precise positioning (addition from the forum)
def lcd_display_string_pos(self, string, line, pos):
if line == 1:
pos_new = pos
elif line == 2:
pos_new = 0x40 + pos
elif line == 3:
pos_new = 0x14 + pos
elif line == 4:
pos_new = 0x54 + pos
self.lcd_write(0x80 + pos_new)
for char in string:
self.lcd_write(ord(char), Rs)
| gpl-3.0 | 9,119,408,727,036,929,000 | 24.803191 | 78 | 0.652031 | false |
santoshsahoo/personfinder | app/admin_review.py | 1 | 5813 | #!/usr/bin/python2.5
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from google.appengine.ext import db
from google.appengine.api import users
import const
import model
import utils
NOTES_PER_PAGE = 50
STATUS_CODES = {
None: 'u',
'': 'u',
'information_sought': 's',
'believed_alive': 'a',
'believed_missing': 'm',
'believed_dead': 'd',
'is_note_author': 'i',
}
class Handler(utils.BaseHandler):
def get(self):
if not self.is_current_user_authorized():
return self.redirect(users.create_login_url('/admin/review'))
#
# Make the navigation links.
status = self.request.get('status') or 'all'
source = self.request.get('source') or 'all'
status_nav_html = ''
for option in [
'all', 'unspecified', 'information_sought', 'is_note_author',
'believed_alive', 'believed_missing', 'believed_dead']:
if option == status:
status_nav_html += '<b>%s</b> ' % option
else:
status_nav_html += '<a href="%s">%s</a> ' % (
self.get_url('/admin/review', status=option, source=source),
option)
source_nav_html = ''
source_options = ['all', '%s.%s' % (self.repo, const.HOME_DOMAIN)]
for auth_key in model.Authorization.all().filter('repo =', self.repo):
if auth_key.domain_write_permission:
source_options.append(auth_key.domain_write_permission)
for option in source_options:
if option == source:
source_nav_html += '<b>%s</b> ' % option
else:
source_nav_html += '<a href="%s">%s</a> ' % (
self.get_url('/admin/review', status=status, source=option),
option)
#
# Construct the query for notes.
query = model.Note.all_in_repo(self.repo
).filter('reviewed =', False
).filter('hidden =', False)
if status == 'unspecified':
query.filter('status =', '')
elif status != 'all':
query.filter('status =', status)
if source != 'all':
query.filter('person_record_id >=', '%s/' % source)
query.filter('person_record_id <', '%s0' % source)
# TODO(ryok): we really want to order by entry_date, but GAE
# restriction applies here, and we can not use two different
# properties for comparison and ordering. The proper solution seems
# to add a property source_domain to Note.
query.order('-person_record_id')
else:
query.order('-entry_date')
skip = self.params.skip or 0
notes = query.fetch(NOTES_PER_PAGE + 1, skip)
for note in notes[:NOTES_PER_PAGE]:
person = model.Person.get(self.repo, note.person_record_id)
if person:
# Copy in the fields of the associated Person.
for name in person.properties():
setattr(note, 'person_' + name, getattr(person, name))
# Get the statuses of the other notes on this Person.
status_codes = ''
for other_note in person.get_notes():
code = STATUS_CODES[other_note.status]
if other_note.note_record_id == note.note_record_id:
code = code.upper()
status_codes += code
note.person_status_codes = status_codes
if len(notes) > NOTES_PER_PAGE:
notes = notes[:NOTES_PER_PAGE]
next_skip = skip + NOTES_PER_PAGE
next_url = self.get_url(
'/admin/review', skip=str(next_skip),
status=status, source=source)
else:
next_url = None
return self.render(
'admin_review.html',
notes=notes,
status_nav_html=status_nav_html,
source_nav_html=source_nav_html,
next_url=next_url,
first=skip + 1,
last=skip + len(notes[:NOTES_PER_PAGE]))
def post(self):
if not self.is_current_user_authorized():
return self.redirect(users.create_login_url('/admin/review'))
notes = []
for name, value in self.request.params.items():
if name.startswith('note.'):
note = model.Note.get(self.repo, name[5:])
if note:
if value in ['accept', 'flag']:
note.reviewed = True
if value == 'flag':
note.hidden = True
notes.append(note)
db.put(notes)
self.redirect('/admin/review',
status=self.params.status,
source=self.params.source)
def is_current_user_authorized(self):
if users.is_current_user_admin(): # admins can always review
return True
domain = self.config.authorized_reviewer_domain
if domain: # also allow any user from the configured domain
user = users.get_current_user()
return user and user.email().endswith('@' + domain)
| apache-2.0 | 6,470,416,251,959,256,000 | 37.243421 | 80 | 0.54963 | false |
arangodb/arangodb | 3rdParty/rocksdb/6.8/tools/advisor/test/test_db_log_parser.py | 14 | 4605 | # Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
# This source code is licensed under both the GPLv2 (found in the
# COPYING file in the root directory) and Apache 2.0 License
# (found in the LICENSE.Apache file in the root directory).
from advisor.db_log_parser import DatabaseLogs, Log, NO_COL_FAMILY
from advisor.rule_parser import Condition, LogCondition
import os
import unittest
class TestLog(unittest.TestCase):
def setUp(self):
self.column_families = ['default', 'col_fam_A']
def test_get_column_family(self):
test_log = (
"2018/05/25-14:34:21.047233 7f82ba72e700 [db/flush_job.cc:371] " +
"[col_fam_A] [JOB 44] Level-0 flush table #84: 1890780 bytes OK"
)
db_log = Log(test_log, self.column_families)
self.assertEqual('col_fam_A', db_log.get_column_family())
test_log = (
"2018/05/25-14:34:21.047233 7f82ba72e700 [db/flush_job.cc:371] " +
"[JOB 44] Level-0 flush table #84: 1890780 bytes OK"
)
db_log = Log(test_log, self.column_families)
db_log.append_message('[default] some remaining part of log')
self.assertEqual(NO_COL_FAMILY, db_log.get_column_family())
def test_get_methods(self):
hr_time = "2018/05/25-14:30:25.491635"
context = "7f82ba72e700"
message = (
"[db/flush_job.cc:331] [default] [JOB 10] Level-0 flush table " +
"#23: started"
)
test_log = hr_time + " " + context + " " + message
db_log = Log(test_log, self.column_families)
self.assertEqual(db_log.get_message(), message)
remaining_message = "[col_fam_A] some more logs"
db_log.append_message(remaining_message)
self.assertEqual(
db_log.get_human_readable_time(), "2018/05/25-14:30:25.491635"
)
self.assertEqual(db_log.get_context(), "7f82ba72e700")
self.assertEqual(db_log.get_timestamp(), 1527258625)
self.assertEqual(
db_log.get_message(), str(message + '\n' + remaining_message)
)
def test_is_new_log(self):
new_log = "2018/05/25-14:34:21.047233 context random new log"
remaining_log = "2018/05/25 not really a new log"
self.assertTrue(Log.is_new_log(new_log))
self.assertFalse(Log.is_new_log(remaining_log))
class TestDatabaseLogs(unittest.TestCase):
def test_check_and_trigger_conditions(self):
this_path = os.path.abspath(os.path.dirname(__file__))
logs_path_prefix = os.path.join(this_path, 'input_files/LOG-0')
column_families = ['default', 'col-fam-A', 'col-fam-B']
db_logs = DatabaseLogs(logs_path_prefix, column_families)
# matches, has 2 col_fams
condition1 = LogCondition.create(Condition('cond-A'))
condition1.set_parameter('regex', 'random log message')
# matches, multiple lines message
condition2 = LogCondition.create(Condition('cond-B'))
condition2.set_parameter('regex', 'continuing on next line')
# does not match
condition3 = LogCondition.create(Condition('cond-C'))
condition3.set_parameter('regex', 'this should match no log')
db_logs.check_and_trigger_conditions(
[condition1, condition2, condition3]
)
cond1_trigger = condition1.get_trigger()
self.assertEqual(2, len(cond1_trigger.keys()))
self.assertSetEqual(
{'col-fam-A', NO_COL_FAMILY}, set(cond1_trigger.keys())
)
self.assertEqual(2, len(cond1_trigger['col-fam-A']))
messages = [
"[db/db_impl.cc:563] [col-fam-A] random log message for testing",
"[db/db_impl.cc:653] [col-fam-A] another random log message"
]
self.assertIn(cond1_trigger['col-fam-A'][0].get_message(), messages)
self.assertIn(cond1_trigger['col-fam-A'][1].get_message(), messages)
self.assertEqual(1, len(cond1_trigger[NO_COL_FAMILY]))
self.assertEqual(
cond1_trigger[NO_COL_FAMILY][0].get_message(),
"[db/db_impl.cc:331] [unknown] random log message no column family"
)
cond2_trigger = condition2.get_trigger()
self.assertEqual(['col-fam-B'], list(cond2_trigger.keys()))
self.assertEqual(1, len(cond2_trigger['col-fam-B']))
self.assertEqual(
cond2_trigger['col-fam-B'][0].get_message(),
"[db/db_impl.cc:234] [col-fam-B] log continuing on next line\n" +
"remaining part of the log"
)
self.assertIsNone(condition3.get_trigger())
| apache-2.0 | 3,976,545,626,828,295,000 | 43.708738 | 79 | 0.616721 | false |
project-zerus/blade | src/blade/sh_test_target.py | 1 | 3937 | # Copyright (c) 2016 Tencent Inc.
# All rights reserved.
#
# Author: Li Wenting <[email protected]>
# Date: June 2, 2016
"""
This module defines sh_test target which executes a shell script.
"""
import os
import blade
import build_rules
import console
from blade_util import var_to_list
from blade_util import location_re
from target import Target
class ShellTest(Target):
"""ShellTest is derived from Target and used to execute a shell script.
Normally by use of testdata you could establish test environment
with all the necessary data and files placed in the runfiles directory
and then refer to those files within the shell script directly.
In addition to the regular files, the user is able to reference
the output of another target in testdata using location references
syntax.
"""
def __init__(self,
name,
srcs,
deps,
testdata,
kwargs):
srcs = var_to_list(srcs)
deps = var_to_list(deps)
testdata = var_to_list(testdata)
Target.__init__(self,
name,
'sh_test',
srcs,
deps,
None,
blade.blade,
kwargs)
self._process_test_data(testdata)
def _process_test_data(self, testdata):
"""
Process test data of which the source could be regular file
or location reference.
"""
self.data['testdata'], self.data['locations'] = [], []
for td in testdata:
if isinstance(td, tuple):
src, dst = td
elif isinstance(td, str):
src, dst = td, ''
else:
console.error_exit('%s: Invalid testdata %s. Test data should '
'be either str or tuple.' % (self.fullname, td))
m = location_re.search(src)
if m:
key, type = self._add_location_reference_target(m)
self.data['locations'].append((key, type, dst))
else:
self.data['testdata'].append(td)
def _generate_test_data_rules(self):
env_name = self._env_name()
var_name = self._var_name('testdata')
targets = self.blade.get_build_targets()
sources = []
for key, type, dst in self.data['locations']:
target = targets[key]
target_var = target._get_target_var(type)
if not target_var:
console.warning('%s: Location %s %s is missing. Ignored.' %
(self.fullname, key, type))
else:
sources.append('%s, %s.Value("%s")' % (target_var, env_name, dst))
if sources:
self._write_rule('%s = %s.ShellTestData(target = "%s.testdata", '
'source = [%s])' % (
var_name, env_name,
self._target_file_path(),
', '.join(sources)))
def scons_rules(self):
self._clone_env()
env_name = self._env_name()
var_name = self._var_name()
srcs = [self._source_file_path(s) for s in self.srcs]
self._write_rule('%s = %s.ShellTest(target = "%s", source = %s)' % (
var_name, env_name,
self._target_file_path(), srcs))
self._generate_test_data_rules()
def sh_test(name,
srcs,
deps=[],
testdata=[],
**kwargs):
blade.blade.register_target(ShellTest(name,
srcs,
deps,
testdata,
kwargs))
build_rules.register_function(sh_test)
| bsd-3-clause | -8,556,172,725,187,871,000 | 30.75 | 83 | 0.493015 | false |
ximion/Clementine-LibDanceTag | data/pythonlibs/uic/properties.py | 1 | 14787 | import logging
import sys
from uic.exceptions import UnsupportedPropertyError
from uic.icon_cache import IconCache
if sys.hexversion >= 0x03000000:
from uic.port_v3.ascii_upper import ascii_upper
else:
from uic.port_v2.ascii_upper import ascii_upper
logger = logging.getLogger(__name__)
DEBUG = logger.debug
QtCore = None
QtGui = None
def int_list(prop):
return [int(child.text) for child in prop]
def float_list(prop):
return [float(child.text) for child in prop]
bool_ = lambda v: v == "true"
def needsWidget(func):
func.needsWidget = True
return func
class Properties(object):
def __init__(self, factory, QtCore_mod, QtGui_mod):
global QtGui, QtCore
QtGui = QtGui_mod
QtCore = QtCore_mod
self.factory = factory
self.reset()
def reset(self):
self.buddies = []
self.delayed_props = []
self.icon_cache = IconCache(self.factory, QtGui)
def _pyEnumMember(self, cpp_name):
try:
prefix, membername = cpp_name.split("::")
DEBUG(membername)
if prefix == "Qt":
return getattr(QtCore.Qt, membername)
else:
return getattr(getattr(QtGui, prefix), membername)
except ValueError:
pass
try:
return getattr(QtCore.Qt, cpp_name)
except AttributeError:
# There seems to be a bug where this can succeed when it shouldn't.
# If so it will be picked up when the generated code is run.
return getattr(getattr(QtGui, self.wclass), cpp_name)
def _set(self, prop):
expr = [self._pyEnumMember(v) for v in prop.text.split('|')]
value = expr[0]
for v in expr[1:]:
value |= v
return value
def _enum(self, prop):
return self._pyEnumMember(prop.text)
def _number(self, prop):
return int(prop.text)
_uInt = _longLong = _uLongLong = _number
def _double(self, prop):
return float(prop.text)
def _bool(self, prop):
return prop.text == 'true'
def _stringlist(self, prop):
return [self._string(p, notr='true') for p in prop]
def _string(self, prop, notr=None):
if prop.get('notr', notr) == 'true':
return self._cstring(prop)
if prop.text is None:
return ""
return QtGui.QApplication.translate(self.uiname, prop.text, None,
QtGui.QApplication.UnicodeUTF8)
_char = _string
def _cstring(self, prop):
return str(prop.text)
def _color(self, prop):
args = int_list(prop)
# Handle the optional alpha component.
alpha = int(prop.get("alpha", "255"))
if alpha != 255:
args.append(alpha)
return QtGui.QColor(*args)
def _point(self, prop):
return QtCore.QPoint(*int_list(prop))
def _pointf(self, prop):
return QtCore.QPointF(*float_list(prop))
def _rect(self, prop):
return QtCore.QRect(*int_list(prop))
def _rectf(self, prop):
return QtCore.QRectF(*float_list(prop))
def _size(self, prop):
return QtCore.QSize(*int_list(prop))
def _sizef(self, prop):
return QtCore.QSizeF(*float_list(prop))
def _pixmap(self, prop):
if prop.text:
return QtGui.QPixmap(prop.text.replace("\\", "\\\\"))
# Don't bother to set the property if the pixmap is empty.
return None
def _iconset(self, prop):
return self.icon_cache.get_icon(prop)
def _url(self, prop):
return QtCore.QUrl(prop[0].text)
def _locale(self, prop):
lang = getattr(QtCore.QLocale, prop.attrib['language'])
country = getattr(QtCore.QLocale, prop.attrib['country'])
return QtCore.QLocale(lang, country)
def _cursor(self, prop):
return QtGui.QCursor(QtCore.Qt.CursorShape(int(prop.text)))
def _date(self, prop):
return QtCore.QDate(*int_list(prop))
def _datetime(self, prop):
args = int_list(prop)
return QtCore.QDateTime(QtCore.QDate(*args[-3:]), QtCore.QTime(*args[:-3]))
def _time(self, prop):
return QtCore.QTime(*int_list(prop))
def _gradient(self, prop):
name = 'gradient'
# Create the specific gradient.
gtype = prop.get('type', '')
if gtype == 'LinearGradient':
startx = float(prop.get('startx'))
starty = float(prop.get('starty'))
endx = float(prop.get('endx'))
endy = float(prop.get('endy'))
gradient = self.factory.createQObject('QLinearGradient', name,
(startx, starty, endx, endy), is_attribute=False)
elif gtype == 'ConicalGradient':
centralx = float(prop.get('centralx'))
centraly = float(prop.get('centraly'))
angle = float(prop.get('angle'))
gradient = self.factory.createQObject('QConicalGradient', name,
(centralx, centraly, angle), is_attribute=False)
elif gtype == 'RadialGradient':
centralx = float(prop.get('centralx'))
centraly = float(prop.get('centraly'))
radius = float(prop.get('radius'))
focalx = float(prop.get('focalx'))
focaly = float(prop.get('focaly'))
gradient = self.factory.createQObject('QRadialGradient', name,
(centralx, centraly, radius, focalx, focaly),
is_attribute=False)
else:
raise UnsupportedPropertyError(prop.tag)
# Set the common values.
spread = prop.get('spread')
if spread:
gradient.setSpread(getattr(QtGui.QGradient, spread))
cmode = prop.get('coordinatemode')
if cmode:
gradient.setCoordinateMode(getattr(QtGui.QGradient, cmode))
# Get the gradient stops.
for gstop in prop:
if gstop.tag != 'gradientstop':
raise UnsupportedPropertyError(gstop.tag)
position = float(gstop.get('position'))
color = self._color(gstop[0])
gradient.setColorAt(position, color)
return name
def _palette(self, prop):
palette = self.factory.createQObject("QPalette", "palette", (),
is_attribute=False)
for palette_elem in prop:
sub_palette = getattr(QtGui.QPalette, palette_elem.tag.title())
for role, color in enumerate(palette_elem):
if color.tag == 'color':
# Handle simple colour descriptions where the role is
# implied by the colour's position.
palette.setColor(sub_palette,
QtGui.QPalette.ColorRole(role), self._color(color))
elif color.tag == 'colorrole':
role = getattr(QtGui.QPalette, color.get('role'))
brushstyle = color[0].get('brushstyle')
if brushstyle in ('LinearGradientPattern', 'ConicalGradientPattern', 'RadialGradientPattern'):
gradient = self._gradient(color[0][0])
brush = self.factory.createQObject("QBrush", "brush",
(gradient, ), is_attribute=False)
else:
color = self._color(color[0][0])
brush = self.factory.createQObject("QBrush", "brush",
(color, ), is_attribute=False)
brushstyle = getattr(QtCore.Qt, brushstyle)
brush.setStyle(brushstyle)
palette.setBrush(sub_palette, role, brush)
else:
raise UnsupportedPropertyError(color.tag)
return palette
#@needsWidget
def _sizepolicy(self, prop, widget):
values = [int(child.text) for child in prop]
if len(values) == 2:
# Qt v4.3.0 and later.
horstretch, verstretch = values
hsizetype = getattr(QtGui.QSizePolicy, prop.get('hsizetype'))
vsizetype = getattr(QtGui.QSizePolicy, prop.get('vsizetype'))
else:
hsizetype, vsizetype, horstretch, verstretch = values
hsizetype = QtGui.QSizePolicy.Policy(hsizetype)
vsizetype = QtGui.QSizePolicy.Policy(vsizetype)
sizePolicy = self.factory.createQObject("QSizePolicy", "sizePolicy",
(hsizetype, vsizetype), is_attribute=False)
sizePolicy.setHorizontalStretch(horstretch)
sizePolicy.setVerticalStretch(verstretch)
sizePolicy.setHeightForWidth(widget.sizePolicy.hasHeightForWidth())
return sizePolicy
_sizepolicy = needsWidget(_sizepolicy)
# font needs special handling/conversion of all child elements.
_font_attributes = (("Family", str),
("PointSize", int),
("Weight", int),
("Italic", bool_),
("Underline", bool_),
("StrikeOut", bool_),
("Bold", bool_))
def _font(self, prop):
newfont = self.factory.createQObject("QFont", "font", (),
is_attribute = False)
for attr, converter in self._font_attributes:
v = prop.findtext("./%s" % (attr.lower(),))
if v is None:
continue
getattr(newfont, "set%s" % (attr,))(converter(v))
return newfont
def _cursorShape(self, prop):
return getattr(QtCore.Qt, prop.text)
def convert(self, prop, widget=None):
try:
func = getattr(self, "_" + prop[0].tag)
except AttributeError:
raise UnsupportedPropertyError(prop[0].tag)
else:
args = {}
if getattr(func, "needsWidget", False):
assert widget is not None
args["widget"] = widget
return func(prop[0], **args)
def _getChild(self, elem_tag, elem, name, default=None):
for prop in elem.findall(elem_tag):
if prop.attrib["name"] == name:
return self.convert(prop)
else:
return default
def getProperty(self, elem, name, default=None):
return self._getChild("property", elem, name, default)
def getAttribute(self, elem, name, default=None):
return self._getChild("attribute", elem, name, default)
def setProperties(self, widget, elem):
try:
self.wclass = elem.attrib["class"]
except KeyError:
pass
for prop in elem.findall("property"):
prop_name = prop.attrib["name"]
DEBUG("setting property %s" % (prop_name,))
try:
stdset = bool(int(prop.attrib["stdset"]))
except KeyError:
stdset = True
if not stdset:
self._setViaSetProperty(widget, prop)
elif hasattr(self, prop_name):
getattr(self, prop_name)(widget, prop)
else:
prop_value = self.convert(prop, widget)
if prop_value is not None:
getattr(widget, "set%s%s" % (ascii_upper(prop_name[0]), prop_name[1:]))(prop_value)
# SPECIAL PROPERTIES
# If a property has a well-known value type but needs special,
# context-dependent handling, the default behaviour can be overridden here.
# Delayed properties will be set after the whole widget tree has been
# populated.
def _delay(self, widget, prop):
prop_value = self.convert(prop)
if prop_value is not None:
prop_name = prop.attrib["name"]
self.delayed_props.append((
getattr(widget, "set%s%s" % (ascii_upper(prop_name[0]), prop_name[1:])),
prop_value))
# These properties will be set with a widget.setProperty call rather than
# calling the set<property> function.
def _setViaSetProperty(self, widget, prop):
prop_value = self.convert(prop)
if prop_value is not None:
widget.setProperty(prop.attrib["name"], prop_value)
# Ignore the property.
def _ignore(self, widget, prop):
pass
# Define properties that use the canned handlers.
currentIndex = _delay
currentRow = _delay
showDropIndicator = _setViaSetProperty
intValue = _setViaSetProperty
value = _setViaSetProperty
objectName = _ignore
leftMargin = _ignore
topMargin = _ignore
rightMargin = _ignore
bottomMargin = _ignore
horizontalSpacing = _ignore
verticalSpacing = _ignore
# buddy setting has to be done after the whole widget tree has been
# populated. We can't use delay here because we cannot get the actual
# buddy yet.
def buddy(self, widget, prop):
buddy_name = prop[0].text
if buddy_name:
self.buddies.append((widget, buddy_name))
# geometry is handled specially if set on the toplevel widget.
def geometry(self, widget, prop):
if widget.objectName == self.uiname:
geom = int_list(prop[0])
widget.resize(geom[2], geom[3])
else:
widget.setGeometry(self._rect(prop[0]))
def orientation(self, widget, prop):
# If the class is a QFrame, it's a line.
if widget.className() == "QFrame":
widget.setFrameShape(
{"Qt::Horizontal": QtGui.QFrame.HLine,
"Qt::Vertical" : QtGui.QFrame.VLine}[prop[0].text])
# In Qt Designer, lines appear to be sunken, QFormBuilder loads
# them as such, uic generates plain lines. We stick to the look in
# Qt Designer.
widget.setFrameShadow(QtGui.QFrame.Sunken)
else:
widget.setOrientation(self._enum(prop[0]))
# The isWrapping attribute of QListView is named inconsistently, it should
# be wrapping.
def isWrapping(self, widget, prop):
widget.setWrapping(self.convert(prop))
# This is a pseudo-property injected to deal with setContentsMargin()
# introduced in Qt v4.3.
def pyuicContentsMargins(self, widget, prop):
widget.setContentsMargins(*int_list(prop))
# This is a pseudo-property injected to deal with setHorizontalSpacing()
# and setVerticalSpacing() introduced in Qt v4.3.
def pyuicSpacing(self, widget, prop):
horiz, vert = int_list(prop)
if horiz == vert:
widget.setSpacing(horiz)
else:
if horiz >= 0:
widget.setHorizontalSpacing(horiz)
if vert >= 0:
widget.setVerticalSpacing(vert)
| gpl-3.0 | 281,255,060,788,967,330 | 32.454751 | 114 | 0.57368 | false |
villaverde/iredadmin | libs/iredutils.py | 1 | 17090 | # encoding: utf-8
# Author: Zhang Huangbin <[email protected]>
from os import urandom, getloadavg
import re
import time
import urllib2
import socket
from base64 import b64encode, b64decode
from xml.dom.minidom import parseString as parseXMLString
import random
import subprocess
import web
import settings
from libs import md5crypt
######################
# Regular expressions.
#
# Email.
reEmail = r'''[\w\-][\w\-\.\+\=]*@[\w\-][\w\-\.]*\.[a-zA-Z0-9\-]{2,15}'''
# Domain.
reDomain = r'''[\w\-][\w\-\.]*\.[a-z0-9\-]{2,15}'''
# End Regular expressions.
####
#####################################
# Pre-defined values of SQL functions.
sqlUnixTimestamp = web.sqlliteral('UNIX_TIMESTAMP()')
#####
##############
# Validators
#
INVALID_EMAIL_CHARS = '~!#$%^&*()\\/\ '
INVALID_DOMAIN_CHARS = '~!#$%^&*()+\\/\ '
def is_email(s):
s = str(s)
if len(set(s) & set(INVALID_EMAIL_CHARS)) > 0 \
or '.' not in s \
or s.count('@') != 1:
return False
reCompEmail = re.compile(reEmail + '$', re.IGNORECASE)
if reCompEmail.match(s):
return True
else:
return False
def is_domain(s):
s = str(s)
if len(set(s) & set(INVALID_DOMAIN_CHARS)) > 0 or '.' not in s:
return False
reCompDomain = re.compile(reDomain + '$', re.IGNORECASE)
if reCompDomain.match(s):
return True
else:
return False
def isStrictIP(s):
s = str(s)
fields = s.split('.')
if len(fields) != 4:
return False
# Must be an interger number (0 < number < 255)
for fld in fields:
if fld.isdigit():
if not 0 < int(fld) < 255:
return False
else:
return False
return True
#
# End Validators
##################
#########################
# Custom Jinja2 filters.
#
def filesizeformat(value, baseMB=False):
"""Format the value like a 'human-readable' file size (i.e. 13 KB,
4.1 MB, 102 bytes, etc). Per default decimal prefixes are used (mega,
giga etc.), if the second parameter is set to `True` the binary
prefixes are (mebi, gibi).
"""
try:
bytes = float(value)
except:
return 0
if baseMB is True:
bytes = bytes * 1024 * 1024
base = 1024
if bytes == 0:
return '0'
ret = '0'
if bytes < base:
ret = '%d Bytes' % (bytes)
elif bytes < base * base:
ret = '%d KB' % (bytes / base)
elif bytes < base * base * base:
ret = '%d MB' % (bytes / (base * base))
elif bytes < base * base * base * base:
if bytes % (base * base * base) == 0:
ret = '%d GB' % (bytes / (base * base * base))
else:
ret = "%d MB" % (bytes / (base * base))
else:
ret = '%.1f TB' % (bytes / (base * base * base * base))
return ret
def set_datetime_format(t, hour=True,):
"""Format LDAP timestamp and Amavisd msgs.time_iso to YYYY-MM-DD HH:MM:SS.
>>> set_datetime_format('20100925T113256Z')
'2010-09-25 11:32:56'
>>> set_datetime_format('20100925T113256Z', hour=False)
'2010-09-25'
>>> set_datetime_format('INVALID_TIME_STAMP') # Return original string
'INVALID_TIME_STAMP'
"""
if t is None:
return '--'
else:
t = str(t)
if not hour:
time_format = '%Y-%m-%d'
else:
time_format = '%Y-%m-%d %H:%M:%S'
# LDAP timestamp
if 'T' not in t and t.endswith('Z'):
try:
return time.strftime(time_format, time.strptime(t, '%Y%m%d%H%M%SZ'))
except:
pass
# MySQL TIMESTAMP(): yyyymmddTHHMMSSZ
if 'T' in t and t.endswith('Z'):
try:
return time.strftime(time_format, time.strptime(t, '%Y%m%dT%H%M%SZ'))
except:
pass
# MySQL NOW(): yyyy-mm-dd HH:MM:SS
if '-' in t and ' ' in t and ':' in t:
# DBMail default last login date.
if t == '1979-11-03 22:05:58':
return '--'
try:
return time.strftime(time_format, time.strptime(t, '%Y-%m-%d %H:%M:%S'))
except:
pass
# ISO8601 UTC ascii time. Used in table: amavisd.msgs.
if len(t) == 14:
try:
return time.strftime(time_format, time.strptime(t, '%Y%m%d%H%M%S'))
except:
pass
return t
def cut_string(s, length=40):
try:
if len(s) != len(s.encode('utf-8', 'replace')):
length = length / 2
if len(s) >= length:
return s[:length] + '...'
else:
return s
except UnicodeDecodeError:
return unicode(s, 'utf-8', 'replace')
except:
return s
#
# End Jinja2 filters.
########################
def get_server_uptime():
try:
# Works on Linux.
f = open("/proc/uptime")
contents = f.read().split()
f.close()
except:
return None
total_seconds = float(contents[0])
MINUTE = 60
HOUR = MINUTE * 60
DAY = HOUR * 24
# Get the days, hours, minutes.
days = int(total_seconds / DAY)
hours = int((total_seconds % DAY) / HOUR)
minutes = int((total_seconds % HOUR) / MINUTE)
return (days, hours, minutes)
def get_system_load_average():
try:
(a1, a2, a3) = getloadavg()
a1 = '%.3f' % a1
a2 = '%.3f' % a2
a3 = '%.3f' % a3
return (a1, a2, a3)
except:
return (0, 0, 0)
def get_gmttime():
# Convert local time to UTC
return time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime())
def convertSQLQueryRecords(qr=[]):
"""Convert SQL record value to avoid incorrect unicode handle in Jinja2.
>>> db = web.DB(None, {})
>>> qr = db.query('SELECT * FROM msgs')
>>> convertSQLQueryRecords(qr)
>>> qr = db.select('msgs')
>>> convertSQLQueryRecords(qr)
"""
rcds = []
for record in qr:
for k in record:
try:
record[k] = web.safeunicode(record.get(k))
except UnicodeDecodeError:
record[k] = '<<< DECODE FAILED >>>'
rcds += [record]
return rcds
def verify_new_password(newpw, confirmpw,
min_passwd_length=settings.min_passwd_length,
max_passwd_length=settings.max_passwd_length):
# Get new passwords from user input.
newpw = str(newpw).strip()
confirmpw = str(confirmpw).strip()
# Empty password is not allowed.
if newpw == confirmpw:
passwd = newpw
else:
return (False, 'PW_MISMATCH')
if not len(passwd) > 0:
return (False, 'PW_EMPTY')
if not len(passwd) >= int(min_passwd_length):
return (False, 'PW_LESS_THAN_MIN_LENGTH')
if int(max_passwd_length) != 0:
if not len(passwd) <= int(max_passwd_length):
return (False, 'PW_GREATER_THAN_MAX_LENGTH')
return (True, passwd)
def generate_random_strings(length=10):
"""Create a random password of specified length"""
try:
length = int(length) or 10
except:
length = 10
# Characters used to generate the random password
chars = '23456789' + 'abcdefghjkmnpqrstuvwxyz' + '23456789' + \
'ABCDEFGHJKLMNPQRSTUVWXYZ' + '23456789' # + '@#&*-+'
return "".join(random.choice(chars) for x in range(length))
def generate_bcrypt_password(p):
try:
import bcrypt
except:
return generate_ssha_password(p)
return '{CRYPT}' + bcrypt.hashpw(p, bcrypt.gensalt())
def verify_bcrypt_password(challenge_password, plain_password):
try:
import bcrypt
except:
return False
if challenge_password.startswith('{CRYPT}$2a$') \
or challenge_password.startswith('{CRYPT}$2b$') \
or challenge_password.startswith('{crypt}$2a$') \
or challenge_password.startswith('{crypt}$2b$'):
challenge_password = challenge_password[7:]
return bcrypt.checkpw(plain_password, challenge_password)
def generate_md5_password(p):
p = str(p).strip()
return md5crypt.unix_md5_crypt(p, generate_random_strings(length=8))
def verify_md5_password(challenge_password, plain_password):
"""Verify salted MD5 password"""
if challenge_password.startswith('{MD5}') or challenge_password.startswith('{md5}'):
challenge_password = challenge_password[5:]
if not (
challenge_password.startswith('$') \
and len(challenge_password) == 34 \
and challenge_password.count('$') == 3):
return False
# Get salt from hashed string
salt = challenge_password.split('$')
salt[-1] = ''
salt = '$'.join(salt)
if md5crypt.md5crypt(plain_password, salt) == challenge_password:
return True
else:
return False
def generate_plain_md5_password(p):
p = str(p).strip()
try:
from hashlib import md5
return md5(p).hexdigest()
except ImportError:
import md5
return md5.new(p).hexdigest()
return p
def verify_plain_md5_password(challenge_password, plain_password):
if challenge_password.startswith('{PLAIN-MD5}') \
or challenge_password.startswith('{plain-md5}'):
challenge_password = challenge_password[11:]
if challenge_password == generate_plain_md5_password(plain_password):
return True
else:
return False
def generate_ssha_password(p):
p = str(p).strip()
salt = urandom(8)
try:
from hashlib import sha1
pw = sha1(p)
except ImportError:
import sha
pw = sha.new(p)
pw.update(salt)
return "{SSHA}" + b64encode(pw.digest() + salt)
def verify_ssha_password(challenge_password, plain_password):
"""Verify SSHA (salted SHA) hash with or without prefix '{SSHA}'"""
if challenge_password.startswith('{SSHA}') \
or challenge_password.startswith('{ssha}'):
challenge_password = challenge_password[6:]
if not len(challenge_password) > 20:
# Not a valid SSHA hash
return False
try:
challenge_bytes = b64decode(challenge_password)
digest = challenge_bytes[:20]
salt = challenge_bytes[20:]
try:
from hashlib import sha1
hr = sha1(plain_password)
except ImportError:
import sha
hr = sha.new(plain_password)
hr.update(salt)
return digest == hr.digest()
except:
return False
def generate_ssha512_password(p):
"""Generate salted SHA512 password with prefix '{SSHA512}'.
Return salted SHA hash if python is older than 2.5 (module hashlib)."""
p = str(p).strip()
try:
from hashlib import sha512
salt = urandom(8)
pw = sha512(p)
pw.update(salt)
return "{SSHA512}" + b64encode(pw.digest() + salt)
except ImportError:
# Use SSHA password instead if python is older than 2.5.
return generate_ssha_password(p)
def verify_ssha512_password(challenge_password, plain_password):
"""Verify SSHA512 password with or without prefix '{SSHA512}'.
Python-2.5 is required since it requires module hashlib."""
if challenge_password.startswith('{SSHA512}') \
or challenge_password.startswith('{ssha512}'):
challenge_password = challenge_password[9:]
# With SSHA512, hash itself is 64 bytes (512 bits/8 bits per byte),
# everything after that 64 bytes is the salt.
if not len(challenge_password) > 64:
return False
try:
challenge_bytes = b64decode(challenge_password)
digest = challenge_bytes[:64]
salt = challenge_bytes[64:]
from hashlib import sha512
hr = sha512(plain_password)
hr.update(salt)
return digest == hr.digest()
except:
return False
def generate_cram_md5_password(p):
"""Generate CRAM-MD5 hash with `doveadm pw` command with prefix '{CRAM-MD5}'.
Return SSHA instead if no 'doveadm' command found or other error raised."""
p = str(p).strip()
try:
pp = subprocess.Popen(['doveadm', 'pw', '-s', 'CRAM-MD5', '-p', p],
stdout=subprocess.PIPE)
return pp.communicate()[0]
except:
return generate_ssha_password(p)
def verify_cram_md5_password(challenge_password, plain_password):
"""Verify CRAM-MD5 hash with 'doveadm pw' command."""
if not challenge_password.startswith('{CRAM-MD5}') \
or not challenge_password.startswith('{cram-md5}'):
return False
try:
exit_status = subprocess.call(['doveadm',
'pw',
'-t',
challenge_password,
'-p',
plain_password])
if exit_status == 0:
return True
except:
pass
return False
def generate_password_hash(p, pwscheme=None):
"""Generate password for LDAP mail user and admin."""
pw = str(p).strip()
if not pwscheme:
pwscheme = settings.DEFAULT_PASSWORD_SCHEME
if pwscheme == 'BCRYPT':
pw = generate_bcrypt_password(p)
elif pwscheme == 'SSHA512':
pw = generate_ssha512_password(p)
elif pwscheme == 'SSHA':
pw = generate_ssha_password(p)
elif pwscheme == 'MD5':
pw = '{CRYPT}' + generate_md5_password(p)
elif pwscheme == 'PLAIN-MD5':
pw = generate_plain_md5_password(p)
elif pwscheme == 'PLAIN':
if settings.SQL_PASSWORD_PREFIX_SCHEME is True:
pw = '{PLAIN}' + p
else:
pw = p
else:
# Plain password
pw = p
return pw
def verify_password_hash(challenge_password, plain_password):
# Check plain password and MD5 first.
if challenge_password in [plain_password,
'{PLAIN}' + plain_password,
'{plain}' + plain_password]:
return True
elif verify_md5_password(challenge_password, plain_password):
return True
upwd = challenge_password.upper()
if upwd.startswith('{SSHA}'):
return verify_ssha_password(challenge_password, plain_password)
elif upwd.startswith('{SSHA512}'):
return verify_ssha512_password(challenge_password, plain_password)
elif upwd.startswith('{PLAIN-MD5}'):
return verify_plain_md5_password(challenge_password, plain_password)
elif upwd.startswith('{CRAM-MD5}'):
return verify_cram_md5_password(challenge_password, plain_password)
elif upwd.startswith('{CRYPT}$2A$') or upwd.startswith('{CRYPT}$2B$'):
return verify_bcrypt_password(challenge_password, plain_password)
return False
def generate_maildir_path(mail,
hashedMaildir=settings.MAILDIR_HASHED,
prependDomainName=settings.MAILDIR_PREPEND_DOMAIN,
appendTimestamp=settings.MAILDIR_APPEND_TIMESTAMP,
):
"""Generate path of mailbox."""
mail = web.safestr(mail)
if not is_email(mail):
return (False, 'INVALID_EMAIL_ADDRESS')
# Get user/domain part from mail address.
username, domain = mail.split('@', 1)
# Get current timestamp.
timestamp = ''
if appendTimestamp:
timestamp = time.strftime('-%Y.%m.%d.%H.%M.%S')
if hashedMaildir is True:
if len(username) >= 3:
maildir = "%s/%s/%s/%s%s/" % (
username[0], username[1], username[2], username, timestamp,
)
elif len(username) == 2:
maildir = "%s/%s/%s/%s%s/" % (
username[0], username[1], username[1], username, timestamp,
)
else:
maildir = "%s/%s/%s/%s%s/" % (
username[0], username[0], username[0], username, timestamp,
)
mailMessageStore = maildir
else:
mailMessageStore = "%s%s/" % (username, timestamp,)
if prependDomainName:
mailMessageStore = domain + '/' + mailMessageStore
return mailMessageStore.lower()
def getNewVersion(urlOfXML):
'''Checking new version via parsing XML string to extract version number.
>>> getNewVersion('http://xxx/sample.xml') # New version available.
(True, {'version': '1.3.0',
'date': '2010-10-01',
'url': 'http://xxx/release-notes-1.3.0.html'
})
>>> getNewVersion('http://xxx/sample.xml') # Error while checking.
(False, 'HTTP Error 404: Not Found')
'''
try:
socket.setdefaulttimeout(5)
dom = parseXMLString(urllib2.urlopen(urlOfXML).read())
version = dom.documentElement.getElementsByTagName('version')[0].childNodes[0].data
date = dom.documentElement.getElementsByTagName('date')[0].childNodes[0].data
urlOfReleaseNotes = dom.documentElement.getElementsByTagName('releasenotes')[0].childNodes[0].data
d = {'version': str(version),
'date': str(date),
'url': str(urlOfReleaseNotes),
}
return (True, d)
except Exception, e:
return (False, str(e))
| gpl-2.0 | 3,642,822,410,555,801,600 | 26.788618 | 106 | 0.572089 | false |
andrewhao/dotfiles | weechat.symlink/python/autoload/wee_slack.py | 1 | 141239 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from functools import wraps
import time
import json
import pickle
import sha
import os
import re
import urllib
import sys
import traceback
import collections
import ssl
import random
import string
from websocket import create_connection, WebSocketConnectionClosedException
# hack to make tests possible.. better way?
try:
import weechat
except:
pass
SCRIPT_NAME = "slack"
SCRIPT_AUTHOR = "Ryan Huber <[email protected]>"
SCRIPT_VERSION = "1.99"
SCRIPT_LICENSE = "MIT"
SCRIPT_DESC = "Extends weechat for typing notification/search/etc on slack.com"
BACKLOG_SIZE = 200
SCROLLBACK_SIZE = 500
RECORD_DIR = "/tmp/weeslack-debug"
SLACK_API_TRANSLATOR = {
"channel": {
"history": "channels.history",
"join": "conversations.join",
"leave": "conversations.leave",
"mark": "channels.mark",
"info": "channels.info",
},
"im": {
"history": "im.history",
"join": "conversations.open",
"leave": "conversations.close",
"mark": "im.mark",
},
"mpim": {
"history": "mpim.history",
"join": "mpim.open", # conversations.open lacks unread_count_display
"leave": "conversations.close",
"mark": "mpim.mark",
"info": "groups.info",
},
"group": {
"history": "groups.history",
"join": "conversations.join",
"leave": "conversations.leave",
"mark": "groups.mark",
"info": "groups.info"
},
"thread": {
"history": None,
"join": None,
"leave": None,
"mark": None,
}
}
###### Decorators have to be up here
def slack_buffer_or_ignore(f):
"""
Only run this function if we're in a slack buffer, else ignore
"""
@wraps(f)
def wrapper(data, current_buffer, *args, **kwargs):
if current_buffer not in EVENTROUTER.weechat_controller.buffers:
return w.WEECHAT_RC_OK
return f(data, current_buffer, *args, **kwargs)
return wrapper
def slack_buffer_required(f):
"""
Only run this function if we're in a slack buffer, else print error
"""
@wraps(f)
def wrapper(data, current_buffer, *args, **kwargs):
if current_buffer not in EVENTROUTER.weechat_controller.buffers:
return w.WEECHAT_RC_ERROR
return f(data, current_buffer, *args, **kwargs)
return wrapper
def utf8_decode(f):
"""
Decode all arguments from byte strings to unicode strings. Use this for
functions called from outside of this script, e.g. callbacks from weechat.
"""
@wraps(f)
def wrapper(*args, **kwargs):
return f(*decode_from_utf8(args), **decode_from_utf8(kwargs))
return wrapper
NICK_GROUP_HERE = "0|Here"
NICK_GROUP_AWAY = "1|Away"
sslopt_ca_certs = {}
if hasattr(ssl, "get_default_verify_paths") and callable(ssl.get_default_verify_paths):
ssl_defaults = ssl.get_default_verify_paths()
if ssl_defaults.cafile is not None:
sslopt_ca_certs = {'ca_certs': ssl_defaults.cafile}
###### Unicode handling
def encode_to_utf8(data):
if isinstance(data, unicode):
return data.encode('utf-8')
if isinstance(data, bytes):
return data
elif isinstance(data, collections.Mapping):
return type(data)(map(encode_to_utf8, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(encode_to_utf8, data))
else:
return data
def decode_from_utf8(data):
if isinstance(data, bytes):
return data.decode('utf-8')
if isinstance(data, unicode):
return data
elif isinstance(data, collections.Mapping):
return type(data)(map(decode_from_utf8, data.iteritems()))
elif isinstance(data, collections.Iterable):
return type(data)(map(decode_from_utf8, data))
else:
return data
class WeechatWrapper(object):
def __init__(self, wrapped_class):
self.wrapped_class = wrapped_class
# Helper method used to encode/decode method calls.
def wrap_for_utf8(self, method):
def hooked(*args, **kwargs):
result = method(*encode_to_utf8(args), **encode_to_utf8(kwargs))
# Prevent wrapped_class from becoming unwrapped
if result == self.wrapped_class:
return self
return decode_from_utf8(result)
return hooked
# Encode and decode everything sent to/received from weechat. We use the
# unicode type internally in wee-slack, but has to send utf8 to weechat.
def __getattr__(self, attr):
orig_attr = self.wrapped_class.__getattribute__(attr)
if callable(orig_attr):
return self.wrap_for_utf8(orig_attr)
else:
return decode_from_utf8(orig_attr)
# Ensure all lines sent to weechat specifies a prefix. For lines after the
# first, we want to disable the prefix, which is done by specifying a space.
def prnt_date_tags(self, buffer, date, tags, message):
message = message.replace("\n", "\n \t")
return self.wrap_for_utf8(self.wrapped_class.prnt_date_tags)(buffer, date, tags, message)
##### Helpers
def get_nick_color_name(nick):
info_name_prefix = "irc_" if int(weechat_version) < 0x1050000 else ""
return w.info_get(info_name_prefix + "nick_color_name", nick)
##### BEGIN NEW
IGNORED_EVENTS = [
"hello",
# "pref_change",
# "reconnect_url",
]
###### New central Event router
class EventRouter(object):
def __init__(self):
"""
complete
Eventrouter is the central hub we use to route:
1) incoming websocket data
2) outgoing http requests and incoming replies
3) local requests
It has a recorder that, when enabled, logs most events
to the location specified in RECORD_DIR.
"""
self.queue = []
self.slow_queue = []
self.slow_queue_timer = 0
self.teams = {}
self.context = {}
self.weechat_controller = WeechatController(self)
self.previous_buffer = ""
self.reply_buffer = {}
self.cmds = {k[8:]: v for k, v in globals().items() if k.startswith("command_")}
self.proc = {k[8:]: v for k, v in globals().items() if k.startswith("process_")}
self.handlers = {k[7:]: v for k, v in globals().items() if k.startswith("handle_")}
self.local_proc = {k[14:]: v for k, v in globals().items() if k.startswith("local_process_")}
self.shutting_down = False
self.recording = False
self.recording_path = "/tmp"
def record(self):
"""
complete
Toggles the event recorder and creates a directory for data if enabled.
"""
self.recording = not self.recording
if self.recording:
if not os.path.exists(RECORD_DIR):
os.makedirs(RECORD_DIR)
def record_event(self, message_json, file_name_field, subdir=None):
"""
complete
Called each time you want to record an event.
message_json is a json in dict form
file_name_field is the json key whose value you want to be part of the file name
"""
now = time.time()
if subdir:
directory = "{}/{}".format(RECORD_DIR, subdir)
else:
directory = RECORD_DIR
if not os.path.exists(directory):
os.makedirs(directory)
mtype = message_json.get(file_name_field, 'unknown')
f = open('{}/{}-{}.json'.format(directory, now, mtype), 'w')
f.write("{}".format(json.dumps(message_json)))
f.close()
def store_context(self, data):
"""
A place to store data and vars needed by callback returns. We need this because
weechat's "callback_data" has a limited size and weechat will crash if you exceed
this size.
"""
identifier = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(40))
self.context[identifier] = data
dbg("stored context {} {} ".format(identifier, data.url))
return identifier
def retrieve_context(self, identifier):
"""
A place to retrieve data and vars needed by callback returns. We need this because
weechat's "callback_data" has a limited size and weechat will crash if you exceed
this size.
"""
data = self.context.get(identifier, None)
if data:
# dbg("retrieved context {} ".format(identifier))
return data
def delete_context(self, identifier):
"""
Requests can span multiple requests, so we may need to delete this as a last step
"""
if identifier in self.context:
# dbg("deleted eontext {} ".format(identifier))
del self.context[identifier]
def shutdown(self):
"""
complete
This toggles shutdown mode. Shutdown mode tells us not to
talk to Slack anymore. Without this, typing /quit will trigger
a race with the buffer close callback and may result in you
leaving every slack channel.
"""
self.shutting_down = not self.shutting_down
def register_team(self, team):
"""
complete
Adds a team to the list of known teams for this EventRouter.
"""
if isinstance(team, SlackTeam):
self.teams[team.get_team_hash()] = team
else:
raise InvalidType(type(team))
def reconnect_if_disconnected(self):
for team_id, team in self.teams.iteritems():
if not team.connected:
team.connect()
dbg("reconnecting {}".format(team))
def receive_ws_callback(self, team_hash):
"""
incomplete (reconnect)
This is called by the global method of the same name.
It is triggered when we have incoming data on a websocket,
which needs to be read. Once it is read, we will ensure
the data is valid JSON, add metadata, and place it back
on the queue for processing as JSON.
"""
try:
# Read the data from the websocket associated with this team.
data = decode_from_utf8(self.teams[team_hash].ws.recv())
message_json = json.loads(data)
metadata = WeeSlackMetadata({
"team": team_hash,
}).jsonify()
message_json["wee_slack_metadata"] = metadata
if self.recording:
self.record_event(message_json, 'type', 'websocket')
self.receive_json(json.dumps(message_json))
except WebSocketConnectionClosedException:
# TODO: handle reconnect here
self.teams[team_hash].set_disconnected()
return w.WEECHAT_RC_OK
except Exception:
dbg("socket issue: {}\n".format(traceback.format_exc()))
return w.WEECHAT_RC_OK
def receive_httprequest_callback(self, data, command, return_code, out, err):
"""
complete
Receives the result of an http request we previously handed
off to weechat (weechat bundles libcurl). Weechat can fragment
replies, so it buffers them until the reply is complete.
It is then populated with metadata here so we can identify
where the request originated and route properly.
"""
request_metadata = self.retrieve_context(data)
try:
dbg("RECEIVED CALLBACK with request of {} id of {} and code {} of length {}".format(request_metadata.request, request_metadata.response_id, return_code, len(out)))
except:
dbg(request_metadata)
return
if return_code == 0:
if len(out) > 0:
if request_metadata.response_id in self.reply_buffer:
# dbg("found response id in reply_buffer", True)
self.reply_buffer[request_metadata.response_id] += out
else:
# dbg("didn't find response id in reply_buffer", True)
self.reply_buffer[request_metadata.response_id] = ""
self.reply_buffer[request_metadata.response_id] += out
try:
j = json.loads(self.reply_buffer[request_metadata.response_id])
except:
pass
# dbg("Incomplete json, awaiting more", True)
try:
j["wee_slack_process_method"] = request_metadata.request_normalized
j["wee_slack_request_metadata"] = pickle.dumps(request_metadata)
self.reply_buffer.pop(request_metadata.response_id)
if self.recording:
self.record_event(j, 'wee_slack_process_method', 'http')
self.receive_json(json.dumps(j))
self.delete_context(data)
except:
dbg("HTTP REQUEST CALLBACK FAILED", True)
pass
# We got an empty reply and this is weird so just ditch it and retry
else:
dbg("length was zero, probably a bug..")
self.delete_context(data)
self.receive(request_metadata)
elif return_code != -1:
self.reply_buffer.pop(request_metadata.response_id, None)
self.delete_context(data)
else:
if request_metadata.response_id not in self.reply_buffer:
self.reply_buffer[request_metadata.response_id] = ""
self.reply_buffer[request_metadata.response_id] += out
def receive_json(self, data):
"""
complete
Receives a raw JSON string from and unmarshals it
as dict, then places it back on the queue for processing.
"""
dbg("RECEIVED JSON of len {}".format(len(data)))
message_json = json.loads(data)
self.queue.append(message_json)
def receive(self, dataobj):
"""
complete
Receives a raw object and places it on the queue for
processing. Object must be known to handle_next or
be JSON.
"""
dbg("RECEIVED FROM QUEUE")
self.queue.append(dataobj)
def receive_slow(self, dataobj):
"""
complete
Receives a raw object and places it on the slow queue for
processing. Object must be known to handle_next or
be JSON.
"""
dbg("RECEIVED FROM QUEUE")
self.slow_queue.append(dataobj)
def handle_next(self):
"""
complete
Main handler of the EventRouter. This is called repeatedly
via callback to drain events from the queue. It also attaches
useful metadata and context to events as they are processed.
"""
if len(self.slow_queue) > 0 and ((self.slow_queue_timer + 1) < time.time()):
# for q in self.slow_queue[0]:
dbg("from slow queue", 0)
self.queue.append(self.slow_queue.pop())
# self.slow_queue = []
self.slow_queue_timer = time.time()
if len(self.queue) > 0:
j = self.queue.pop(0)
# Reply is a special case of a json reply from websocket.
kwargs = {}
if isinstance(j, SlackRequest):
if j.should_try():
if j.retry_ready():
local_process_async_slack_api_request(j, self)
else:
self.slow_queue.append(j)
else:
dbg("Max retries for Slackrequest")
else:
if "reply_to" in j:
dbg("SET FROM REPLY")
function_name = "reply"
elif "type" in j:
dbg("SET FROM type")
function_name = j["type"]
elif "wee_slack_process_method" in j:
dbg("SET FROM META")
function_name = j["wee_slack_process_method"]
else:
dbg("SET FROM NADA")
function_name = "unknown"
# Here we are passing the actual objects. No more lookups.
meta = j.get("wee_slack_metadata", None)
if meta:
try:
if isinstance(meta, basestring):
dbg("string of metadata")
team = meta.get("team", None)
if team:
kwargs["team"] = self.teams[team]
if "user" in j:
kwargs["user"] = self.teams[team].users[j["user"]]
if "channel" in j:
kwargs["channel"] = self.teams[team].channels[j["channel"]]
except:
dbg("metadata failure")
if function_name not in IGNORED_EVENTS:
dbg("running {}".format(function_name))
if function_name.startswith("local_") and function_name in self.local_proc:
self.local_proc[function_name](j, self, **kwargs)
elif function_name in self.proc:
self.proc[function_name](j, self, **kwargs)
elif function_name in self.handlers:
self.handlers[function_name](j, self, **kwargs)
else:
raise ProcessNotImplemented(function_name)
def handle_next(*args):
"""
complete
This is just a place to call the event router globally.
This is a dirty hack. There must be a better way.
"""
try:
EVENTROUTER.handle_next()
except:
if config.debug_mode:
traceback.print_exc()
else:
pass
return w.WEECHAT_RC_OK
class WeechatController(object):
"""
Encapsulates our interaction with weechat
"""
def __init__(self, eventrouter):
self.eventrouter = eventrouter
self.buffers = {}
self.previous_buffer = None
self.buffer_list_stale = False
def iter_buffers(self):
for b in self.buffers:
yield (b, self.buffers[b])
def register_buffer(self, buffer_ptr, channel):
"""
complete
Adds a weechat buffer to the list of handled buffers for this EventRouter
"""
if isinstance(buffer_ptr, basestring):
self.buffers[buffer_ptr] = channel
else:
raise InvalidType(type(buffer_ptr))
def unregister_buffer(self, buffer_ptr, update_remote=False, close_buffer=False):
"""
complete
Adds a weechat buffer to the list of handled buffers for this EventRouter
"""
if isinstance(buffer_ptr, basestring):
try:
self.buffers[buffer_ptr].destroy_buffer(update_remote)
if close_buffer:
w.buffer_close(buffer_ptr)
del self.buffers[buffer_ptr]
except:
dbg("Tried to close unknown buffer")
else:
raise InvalidType(type(buffer_ptr))
def get_channel_from_buffer_ptr(self, buffer_ptr):
return self.buffers.get(buffer_ptr, None)
def get_all(self, buffer_ptr):
return self.buffers
def get_previous_buffer_ptr(self):
return self.previous_buffer
def set_previous_buffer(self, data):
self.previous_buffer = data
def check_refresh_buffer_list(self):
return self.buffer_list_stale and self.last_buffer_list_update + 1 < time.time()
def set_refresh_buffer_list(self, setting):
self.buffer_list_stale = setting
###### New Local Processors
def local_process_async_slack_api_request(request, event_router):
"""
complete
Sends an API request to Slack. You'll need to give this a well formed SlackRequest object.
DEBUGGING!!! The context here cannot be very large. Weechat will crash.
"""
if not event_router.shutting_down:
weechat_request = 'url:{}'.format(request.request_string())
weechat_request += '&nonce={}'.format(''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(4)))
params = {'useragent': 'wee_slack {}'.format(SCRIPT_VERSION)}
request.tried()
context = event_router.store_context(request)
# TODO: let flashcode know about this bug - i have to 'clear' the hashtable or retry requests fail
w.hook_process_hashtable('url:', params, config.slack_timeout, "", context)
w.hook_process_hashtable(weechat_request, params, config.slack_timeout, "receive_httprequest_callback", context)
###### New Callbacks
@utf8_decode
def receive_httprequest_callback(data, command, return_code, out, err):
"""
complete
This is a dirty hack. There must be a better way.
"""
# def url_processor_cb(data, command, return_code, out, err):
EVENTROUTER.receive_httprequest_callback(data, command, return_code, out, err)
return w.WEECHAT_RC_OK
@utf8_decode
def receive_ws_callback(*args):
"""
complete
The first arg is all we want here. It contains the team
hash which is set when we _hook the descriptor.
This is a dirty hack. There must be a better way.
"""
EVENTROUTER.receive_ws_callback(args[0])
return w.WEECHAT_RC_OK
@utf8_decode
def reconnect_callback(*args):
EVENTROUTER.reconnect_if_disconnected()
return w.WEECHAT_RC_OK
@utf8_decode
def buffer_closing_callback(signal, sig_type, data):
"""
complete
Receives a callback from weechat when a buffer is being closed.
We pass the eventrouter variable name in as a string, as
that is the only way we can do dependency injection via weechat
callback, hence the eval.
"""
eval(signal).weechat_controller.unregister_buffer(data, True, False)
return w.WEECHAT_RC_OK
@utf8_decode
def buffer_input_callback(signal, buffer_ptr, data):
"""
incomplete
Handles everything a user types in the input bar. In our case
this includes add/remove reactions, modifying messages, and
sending messages.
"""
eventrouter = eval(signal)
channel = eventrouter.weechat_controller.get_channel_from_buffer_ptr(buffer_ptr)
if not channel:
return w.WEECHAT_RC_ERROR
reaction = re.match("^(\d*)(\+|-):(.*):\s*$", data)
substitute = re.match("^(\d*)s/", data)
if reaction:
if reaction.group(2) == "+":
channel.send_add_reaction(int(reaction.group(1) or 1), reaction.group(3))
elif reaction.group(2) == "-":
channel.send_remove_reaction(int(reaction.group(1) or 1), reaction.group(3))
elif substitute:
msgno = int(substitute.group(1) or 1)
try:
old, new, flags = re.split(r'(?<!\\)/', data)[1:]
except ValueError:
pass
else:
# Replacement string in re.sub() is a string, not a regex, so get
# rid of escapes.
new = new.replace(r'\/', '/')
old = old.replace(r'\/', '/')
channel.edit_nth_previous_message(msgno, old, new, flags)
else:
if data.startswith(('//', ' ')):
data = data[1:]
channel.send_message(data)
# this is probably wrong channel.mark_read(update_remote=True, force=True)
return w.WEECHAT_RC_OK
# Workaround for supporting multiline messages. It intercepts before the input
# callback is called, as this is called with the whole message, while it is
# normally split on newline before being sent to buffer_input_callback
def input_text_for_buffer_cb(data, modifier, current_buffer, string):
if current_buffer not in EVENTROUTER.weechat_controller.buffers:
return string
message = decode_from_utf8(string)
if not message.startswith("/") and "\n" in message:
buffer_input_callback("EVENTROUTER", current_buffer, message)
return ""
return string
@utf8_decode
def buffer_switch_callback(signal, sig_type, data):
"""
incomplete
Every time we change channels in weechat, we call this to:
1) set read marker 2) determine if we have already populated
channel history data
"""
eventrouter = eval(signal)
prev_buffer_ptr = eventrouter.weechat_controller.get_previous_buffer_ptr()
# this is to see if we need to gray out things in the buffer list
prev = eventrouter.weechat_controller.get_channel_from_buffer_ptr(prev_buffer_ptr)
if prev:
prev.mark_read()
new_channel = eventrouter.weechat_controller.get_channel_from_buffer_ptr(data)
if new_channel:
if not new_channel.got_history:
new_channel.get_history()
eventrouter.weechat_controller.set_previous_buffer(data)
return w.WEECHAT_RC_OK
@utf8_decode
def buffer_list_update_callback(data, somecount):
"""
incomplete
A simple timer-based callback that will update the buffer list
if needed. We only do this max 1x per second, as otherwise it
uses a lot of cpu for minimal changes. We use buffer short names
to indicate typing via "#channel" <-> ">channel" and
user presence via " name" <-> "+name".
"""
eventrouter = eval(data)
# global buffer_list_update
for b in eventrouter.weechat_controller.iter_buffers():
b[1].refresh()
# buffer_list_update = True
# if eventrouter.weechat_controller.check_refresh_buffer_list():
# # gray_check = False
# # if len(servers) > 1:
# # gray_check = True
# eventrouter.weechat_controller.set_refresh_buffer_list(False)
return w.WEECHAT_RC_OK
def quit_notification_callback(signal, sig_type, data):
stop_talking_to_slack()
@utf8_decode
def typing_notification_cb(signal, sig_type, data):
msg = w.buffer_get_string(data, "input")
if len(msg) > 8 and msg[:1] != "/":
global typing_timer
now = time.time()
if typing_timer + 4 < now:
current_buffer = w.current_buffer()
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
if channel and channel.type != "thread":
identifier = channel.identifier
request = {"type": "typing", "channel": identifier}
channel.team.send_to_websocket(request, expect_reply=False)
typing_timer = now
return w.WEECHAT_RC_OK
@utf8_decode
def typing_update_cb(data, remaining_calls):
w.bar_item_update("slack_typing_notice")
return w.WEECHAT_RC_OK
@utf8_decode
def slack_never_away_cb(data, remaining_calls):
if config.never_away:
for t in EVENTROUTER.teams.values():
slackbot = t.get_channel_map()['slackbot']
channel = t.channels[slackbot]
request = {"type": "typing", "channel": channel.identifier}
channel.team.send_to_websocket(request, expect_reply=False)
return w.WEECHAT_RC_OK
@utf8_decode
def typing_bar_item_cb(data, current_buffer, args):
"""
Privides a bar item indicating who is typing in the current channel AND
why is typing a DM to you globally.
"""
typers = []
current_buffer = w.current_buffer()
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
# first look for people typing in this channel
if current_channel:
# this try is mostly becuase server buffers don't implement is_someone_typing
try:
if current_channel.type != 'im' and current_channel.is_someone_typing():
typers += current_channel.get_typing_list()
except:
pass
# here is where we notify you that someone is typing in DM
# regardless of which buffer you are in currently
for t in EVENTROUTER.teams.values():
for channel in t.channels.values():
if channel.type == "im":
if channel.is_someone_typing():
typers.append("D/" + channel.slack_name)
pass
typing = ", ".join(typers)
if typing != "":
typing = w.color('yellow') + "typing: " + typing
return typing
@utf8_decode
def nick_completion_cb(data, completion_item, current_buffer, completion):
"""
Adds all @-prefixed nicks to completion list
"""
current_buffer = w.current_buffer()
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
if current_channel is None or current_channel.members is None:
return w.WEECHAT_RC_OK
for m in current_channel.members:
u = current_channel.team.users.get(m, None)
if u:
w.hook_completion_list_add(completion, "@" + u.slack_name, 1, w.WEECHAT_LIST_POS_SORT)
return w.WEECHAT_RC_OK
@utf8_decode
def emoji_completion_cb(data, completion_item, current_buffer, completion):
"""
Adds all :-prefixed emoji to completion list
"""
current_buffer = w.current_buffer()
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
if current_channel is None:
return w.WEECHAT_RC_OK
for e in EMOJI['emoji']:
w.hook_completion_list_add(completion, ":" + e + ":", 0, w.WEECHAT_LIST_POS_SORT)
return w.WEECHAT_RC_OK
@utf8_decode
def complete_next_cb(data, current_buffer, command):
"""Extract current word, if it is equal to a nick, prefix it with @ and
rely on nick_completion_cb adding the @-prefixed versions to the
completion lists, then let Weechat's internal completion do its
thing
"""
current_buffer = w.current_buffer()
current_channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
# channel = channels.find(current_buffer)
if not hasattr(current_channel, 'members') or current_channel is None or current_channel.members is None:
return w.WEECHAT_RC_OK
line_input = w.buffer_get_string(current_buffer, "input")
current_pos = w.buffer_get_integer(current_buffer, "input_pos") - 1
input_length = w.buffer_get_integer(current_buffer, "input_length")
word_start = 0
word_end = input_length
# If we're on a non-word, look left for something to complete
while current_pos >= 0 and line_input[current_pos] != '@' and not line_input[current_pos].isalnum():
current_pos = current_pos - 1
if current_pos < 0:
current_pos = 0
for l in range(current_pos, 0, -1):
if line_input[l] != '@' and not line_input[l].isalnum():
word_start = l + 1
break
for l in range(current_pos, input_length):
if not line_input[l].isalnum():
word_end = l
break
word = line_input[word_start:word_end]
for m in current_channel.members:
u = current_channel.team.users.get(m, None)
if u and u.slack_name == word:
# Here, we cheat. Insert a @ in front and rely in the @
# nicks being in the completion list
w.buffer_set(current_buffer, "input", line_input[:word_start] + "@" + line_input[word_start:])
w.buffer_set(current_buffer, "input_pos", str(w.buffer_get_integer(current_buffer, "input_pos") + 1))
return w.WEECHAT_RC_OK_EAT
return w.WEECHAT_RC_OK
def script_unloaded():
stop_talking_to_slack()
return w.WEECHAT_RC_OK
def stop_talking_to_slack():
"""
complete
Prevents a race condition where quitting closes buffers
which triggers leaving the channel because of how close
buffer is handled
"""
EVENTROUTER.shutdown()
return w.WEECHAT_RC_OK
##### New Classes
class SlackRequest(object):
"""
complete
Encapsulates a Slack api request. Valuable as an object that we can add to the queue and/or retry.
makes a SHA of the requst url and current time so we can re-tag this on the way back through.
"""
def __init__(self, token, request, post_data={}, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
self.tries = 0
self.start_time = time.time()
self.domain = 'api.slack.com'
self.request = request
self.request_normalized = re.sub(r'\W+', '', request)
self.token = token
post_data["token"] = token
self.post_data = post_data
self.params = {'useragent': 'wee_slack {}'.format(SCRIPT_VERSION)}
self.url = 'https://{}/api/{}?{}'.format(self.domain, request, urllib.urlencode(encode_to_utf8(post_data)))
self.response_id = sha.sha("{}{}".format(self.url, self.start_time)).hexdigest()
self.retries = kwargs.get('retries', 3)
# def __repr__(self):
# return "URL: {} Tries: {} ID: {}".format(self.url, self.tries, self.response_id)
def request_string(self):
return "{}".format(self.url)
def tried(self):
self.tries += 1
self.response_id = sha.sha("{}{}".format(self.url, time.time())).hexdigest()
def should_try(self):
return self.tries < self.retries
def retry_ready(self):
return (self.start_time + (self.tries**2)) < time.time()
class SlackTeam(object):
"""
incomplete
Team object under which users and channels live.. Does lots.
"""
def __init__(self, eventrouter, token, websocket_url, subdomain, nick, myidentifier, users, bots, channels, **kwargs):
self.ws_url = websocket_url
self.connected = False
self.connecting = False
# self.ws = None
self.ws_counter = 0
self.ws_replies = {}
self.eventrouter = eventrouter
self.token = token
self.team = self
self.subdomain = subdomain
self.domain = subdomain + ".slack.com"
self.preferred_name = self.domain
self.nick = nick
self.myidentifier = myidentifier
try:
if self.channels:
for c in channels.keys():
if not self.channels.get(c):
self.channels[c] = channels[c]
except:
self.channels = channels
self.users = users
self.bots = bots
self.team_hash = SlackTeam.generate_team_hash(self.nick, self.subdomain)
self.name = self.domain
self.channel_buffer = None
self.got_history = True
self.create_buffer()
self.set_muted_channels(kwargs.get('muted_channels', ""))
for c in self.channels.keys():
channels[c].set_related_server(self)
channels[c].check_should_open()
# self.channel_set_related_server(c)
# Last step is to make sure my nickname is the set color
self.users[self.myidentifier].force_color(w.config_string(w.config_get('weechat.color.chat_nick_self')))
# This highlight step must happen after we have set related server
self.set_highlight_words(kwargs.get('highlight_words', ""))
def __eq__(self, compare_str):
if compare_str == self.token or compare_str == self.domain or compare_str == self.subdomain:
return True
else:
return False
def add_channel(self, channel):
self.channels[channel["id"]] = channel
channel.set_related_server(self)
# def connect_request_generate(self):
# return SlackRequest(self.token, 'rtm.start', {})
# def close_all_buffers(self):
# for channel in self.channels:
# self.eventrouter.weechat_controller.unregister_buffer(channel.channel_buffer, update_remote=False, close_buffer=True)
# #also close this server buffer
# self.eventrouter.weechat_controller.unregister_buffer(self.channel_buffer, update_remote=False, close_buffer=True)
def create_buffer(self):
if not self.channel_buffer:
if config.short_buffer_names:
self.preferred_name = self.subdomain
elif config.server_aliases not in ['', None]:
name = config.server_aliases.get(self.subdomain, None)
if name:
self.preferred_name = name
else:
self.preferred_name = self.domain
self.channel_buffer = w.buffer_new("{}".format(self.preferred_name), "buffer_input_callback", "EVENTROUTER", "", "")
self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
w.buffer_set(self.channel_buffer, "localvar_set_type", 'server')
w.buffer_set(self.channel_buffer, "localvar_set_nick", self.nick)
w.buffer_set(self.channel_buffer, "localvar_set_server", self.preferred_name)
if w.config_string(w.config_get('irc.look.server_buffer')) == 'merge_with_core':
w.buffer_merge(self.channel_buffer, w.buffer_search_main())
def set_muted_channels(self, muted_str):
self.muted_channels = {x for x in muted_str.split(',')}
def set_highlight_words(self, highlight_str):
self.highlight_words = {x for x in highlight_str.split(',')}
if len(self.highlight_words) > 0:
for v in self.channels.itervalues():
v.set_highlights()
def formatted_name(self, **kwargs):
return self.domain
def buffer_prnt(self, data):
w.prnt_date_tags(self.channel_buffer, SlackTS().major, tag("team"), data)
def get_channel_map(self):
return {v.slack_name: k for k, v in self.channels.iteritems()}
def get_username_map(self):
return {v.slack_name: k for k, v in self.users.iteritems()}
def get_team_hash(self):
return self.team_hash
@staticmethod
def generate_team_hash(nick, subdomain):
return str(sha.sha("{}{}".format(nick, subdomain)).hexdigest())
def refresh(self):
self.rename()
def rename(self):
pass
# def attach_websocket(self, ws):
# self.ws = ws
def is_user_present(self, user_id):
user = self.users.get(user_id)
if user.presence == 'active':
return True
else:
return False
def mark_read(self, ts=None, update_remote=True, force=False):
pass
def connect(self):
if not self.connected and not self.connecting:
self.connecting = True
if self.ws_url:
try:
ws = create_connection(self.ws_url, sslopt=sslopt_ca_certs)
self.hook = w.hook_fd(ws.sock._sock.fileno(), 1, 0, 0, "receive_ws_callback", self.get_team_hash())
ws.sock.setblocking(0)
self.ws = ws
# self.attach_websocket(ws)
self.set_connected()
self.connecting = False
except Exception as e:
dbg("websocket connection error: {}".format(decode_from_utf8(e)))
self.connecting = False
return False
else:
# The fast reconnect failed, so start over-ish
for chan in self.channels:
self.channels[chan].got_history = False
s = SlackRequest(self.token, 'rtm.start', {}, retries=999)
self.eventrouter.receive(s)
self.connecting = False
# del self.eventrouter.teams[self.get_team_hash()]
self.set_reconnect_url(None)
def set_connected(self):
self.connected = True
def set_disconnected(self):
w.unhook(self.hook)
self.connected = False
def set_reconnect_url(self, url):
self.ws_url = url
def next_ws_transaction_id(self):
if self.ws_counter > 999:
self.ws_counter = 0
self.ws_counter += 1
return self.ws_counter
def send_to_websocket(self, data, expect_reply=True):
data["id"] = self.next_ws_transaction_id()
message = json.dumps(data)
try:
if expect_reply:
self.ws_replies[data["id"]] = data
self.ws.send(encode_to_utf8(message))
dbg("Sent {}...".format(message[:100]))
except:
print "WS ERROR"
dbg("Unexpected error: {}\nSent: {}".format(sys.exc_info()[0], data))
self.set_connected()
def update_member_presence(self, user, presence):
user.presence = presence
for c in self.channels:
c = self.channels[c]
if user.id in c.members:
c.update_nicklist(user.id)
class SlackChannel(object):
"""
Represents an individual slack channel.
"""
def __init__(self, eventrouter, **kwargs):
# We require these two things for a vaid object,
# the rest we can just learn from slack
self.active = False
for key, value in kwargs.items():
setattr(self, key, value)
self.members = set(kwargs.get('members', set()))
self.eventrouter = eventrouter
self.slack_name = kwargs["name"]
self.slack_purpose = kwargs.get("purpose", {"value": ""})
self.topic = kwargs.get("topic", {}).get("value", "")
self.identifier = kwargs["id"]
self.last_read = SlackTS(kwargs.get("last_read", SlackTS()))
self.channel_buffer = None
self.team = kwargs.get('team', None)
self.got_history = False
self.messages = {}
self.hashed_messages = {}
self.new_messages = False
self.typing = {}
self.type = 'channel'
self.set_name(self.slack_name)
# short name relates to the localvar we change for typing indication
self.current_short_name = self.name
self.update_nicklist()
self.unread_count_display = 0
def __eq__(self, compare_str):
if compare_str == self.slack_name or compare_str == self.formatted_name() or compare_str == self.formatted_name(style="long_default"):
return True
else:
return False
def __repr__(self):
return "Name:{} Identifier:{}".format(self.name, self.identifier)
def set_name(self, slack_name):
self.name = "#" + slack_name
def refresh(self):
return self.rename()
def rename(self):
if self.channel_buffer:
new_name = self.formatted_name(typing=self.is_someone_typing(), style="sidebar")
if self.current_short_name != new_name:
self.current_short_name = new_name
w.buffer_set(self.channel_buffer, "short_name", new_name)
return True
return False
def get_members(self):
return self.members
def set_unread_count_display(self, count):
self.unread_count_display = count
self.new_messages = bool(self.unread_count_display)
for c in range(self.unread_count_display):
if self.type == "im":
w.buffer_set(self.channel_buffer, "hotlist", "2")
else:
w.buffer_set(self.channel_buffer, "hotlist", "1")
def formatted_name(self, style="default", typing=False, **kwargs):
if typing and config.channel_name_typing_indicator:
prepend = ">"
elif self.type == "group":
prepend = config.group_name_prefix
else:
prepend = "#"
select = {
"default": prepend + self.slack_name,
"sidebar": prepend + self.slack_name,
"base": self.slack_name,
"long_default": "{}.{}{}".format(self.team.preferred_name, prepend, self.slack_name),
"long_base": "{}.{}".format(self.team.preferred_name, self.slack_name),
}
return select[style]
def render_topic(self):
if self.channel_buffer:
if self.topic != "":
topic = self.topic
else:
topic = self.slack_purpose['value']
w.buffer_set(self.channel_buffer, "title", topic)
def set_topic(self, value):
self.topic = value
self.render_topic()
def update_from_message_json(self, message_json):
for key, value in message_json.items():
setattr(self, key, value)
def open(self, update_remote=True):
if update_remote:
if "join" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["join"], {"channel": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
self.create_buffer()
self.active = True
self.get_history()
# self.create_buffer()
def check_should_open(self, force=False):
if hasattr(self, "is_archived") and self.is_archived:
return
if force:
self.create_buffer()
return
# Only check is_member if is_open is not set, because in some cases
# (e.g. group DMs), is_member should be ignored in favor of is_open.
is_open = self.is_open if hasattr(self, "is_open") else self.is_member
if is_open or self.unread_count_display:
self.create_buffer()
if config.background_load_all_history:
self.get_history(slow_queue=True)
def set_related_server(self, team):
self.team = team
def set_highlights(self):
# highlight my own name and any set highlights
if self.channel_buffer:
highlights = self.team.highlight_words.union({'@' + self.team.nick, self.team.myidentifier, "!here", "!channel", "!everyone"})
h_str = ",".join(highlights)
w.buffer_set(self.channel_buffer, "highlight_words", h_str)
def create_buffer(self):
"""
incomplete (muted doesn't work)
Creates the weechat buffer where the channel magic happens.
"""
if not self.channel_buffer:
self.active = True
self.channel_buffer = w.buffer_new(self.formatted_name(style="long_default"), "buffer_input_callback", "EVENTROUTER", "", "")
self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
if self.type == "im":
w.buffer_set(self.channel_buffer, "localvar_set_type", 'private')
else:
w.buffer_set(self.channel_buffer, "localvar_set_type", 'channel')
w.buffer_set(self.channel_buffer, "localvar_set_channel", self.formatted_name())
w.buffer_set(self.channel_buffer, "localvar_set_nick", self.team.nick)
w.buffer_set(self.channel_buffer, "short_name", self.formatted_name(style="sidebar", enable_color=True))
self.render_topic()
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
if self.channel_buffer:
# if self.team.server_alias:
# w.buffer_set(self.channel_buffer, "localvar_set_server", self.team.server_alias)
# else:
w.buffer_set(self.channel_buffer, "localvar_set_server", self.team.preferred_name)
# else:
# self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
self.update_nicklist()
if "info" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["info"], {"channel": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
if self.type == "im":
if "join" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["join"], {"users": self.user, "return_im": True}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
def destroy_buffer(self, update_remote):
if self.channel_buffer is not None:
self.channel_buffer = None
self.messages = {}
self.hashed_messages = {}
self.got_history = False
# if update_remote and not eventrouter.shutting_down:
self.active = False
if update_remote and not self.eventrouter.shutting_down:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["leave"], {"channel": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
def buffer_prnt(self, nick, text, timestamp=str(time.time()), tagset=None, tag_nick=None, **kwargs):
data = "{}\t{}".format(nick, text)
ts = SlackTS(timestamp)
last_read = SlackTS(self.last_read)
# without this, DMs won't open automatically
if not self.channel_buffer and ts > last_read:
self.open(update_remote=False)
if self.channel_buffer:
# backlog messages - we will update the read marker as we print these
backlog = True if ts <= last_read else False
if tagset:
tags = tag(tagset, user=tag_nick)
self.new_messages = True
# we have to infer the tagset because we weren't told
elif ts <= last_read:
tags = tag("backlog", user=tag_nick)
elif self.type in ["im", "mpdm"]:
if nick != self.team.nick:
tags = tag("dm", user=tag_nick)
self.new_messages = True
else:
tags = tag("dmfromme")
else:
tags = tag("default", user=tag_nick)
self.new_messages = True
try:
if config.unhide_buffers_with_activity and not self.is_visible() and (self.identifier not in self.team.muted_channels):
w.buffer_set(self.channel_buffer, "hidden", "0")
w.prnt_date_tags(self.channel_buffer, ts.major, tags, data)
modify_print_time(self.channel_buffer, ts.minorstr(), ts.major)
if backlog:
self.mark_read(ts, update_remote=False, force=True)
except:
dbg("Problem processing buffer_prnt")
def send_message(self, message, request_dict_ext={}):
# team = self.eventrouter.teams[self.team]
message = linkify_text(message, self.team, self)
dbg(message)
request = {"type": "message", "channel": self.identifier, "text": message, "_team": self.team.team_hash, "user": self.team.myidentifier}
request.update(request_dict_ext)
self.team.send_to_websocket(request)
self.mark_read(update_remote=False, force=True)
def store_message(self, message, team, from_me=False):
if not self.active:
return
if from_me:
message.message_json["user"] = team.myidentifier
self.messages[SlackTS(message.ts)] = message
if len(self.messages.keys()) > SCROLLBACK_SIZE:
mk = self.messages.keys()
mk.sort()
for k in mk[:SCROLLBACK_SIZE]:
msg_to_delete = self.messages[k]
if msg_to_delete.hash:
del self.hashed_messages[msg_to_delete.hash]
del self.messages[k]
def change_message(self, ts, text=None, suffix=None):
ts = SlackTS(ts)
if ts in self.messages:
m = self.messages[ts]
if text:
m.change_text(text)
if suffix:
m.change_suffix(suffix)
text = m.render(force=True)
modify_buffer_line(self.channel_buffer, text, ts.major, ts.minor)
return True
def edit_nth_previous_message(self, n, old, new, flags):
message = self.my_last_message(n)
if new == "" and old == "":
s = SlackRequest(self.team.token, "chat.delete", {"channel": self.identifier, "ts": message['ts']}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
else:
num_replace = 1
if 'g' in flags:
num_replace = 0
new_message = re.sub(old, new, message["text"], num_replace)
if new_message != message["text"]:
s = SlackRequest(self.team.token, "chat.update", {"channel": self.identifier, "ts": message['ts'], "text": new_message}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
def my_last_message(self, msgno):
for message in reversed(self.sorted_message_keys()):
m = self.messages[message]
if "user" in m.message_json and "text" in m.message_json and m.message_json["user"] == self.team.myidentifier:
msgno -= 1
if msgno == 0:
return m.message_json
def is_visible(self):
return w.buffer_get_integer(self.channel_buffer, "hidden") == 0
def get_history(self, slow_queue=False):
if not self.got_history:
# we have probably reconnected. flush the buffer
if self.team.connected:
w.buffer_clear(self.channel_buffer)
self.buffer_prnt('', 'getting channel history...', tagset='backlog')
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["history"], {"channel": self.identifier, "count": BACKLOG_SIZE}, team_hash=self.team.team_hash, channel_identifier=self.identifier, clear=True)
if not slow_queue:
self.eventrouter.receive(s)
else:
self.eventrouter.receive_slow(s)
self.got_history = True
def send_add_reaction(self, msg_number, reaction):
self.send_change_reaction("reactions.add", msg_number, reaction)
def send_remove_reaction(self, msg_number, reaction):
self.send_change_reaction("reactions.remove", msg_number, reaction)
def send_change_reaction(self, method, msg_number, reaction):
if 0 < msg_number < len(self.messages):
timestamp = self.sorted_message_keys()[-msg_number]
data = {"channel": self.identifier, "timestamp": timestamp, "name": reaction}
s = SlackRequest(self.team.token, method, data)
self.eventrouter.receive(s)
def sorted_message_keys(self):
keys = []
for k in self.messages:
if type(self.messages[k]) == SlackMessage:
keys.append(k)
return sorted(keys)
# Typing related
def set_typing(self, user):
if self.channel_buffer and self.is_visible():
self.typing[user] = time.time()
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
def unset_typing(self, user):
if self.channel_buffer and self.is_visible():
u = self.typing.get(user, None)
if u:
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
def is_someone_typing(self):
"""
Walks through dict of typing folks in a channel and fast
returns if any of them is actively typing. If none are,
nulls the dict and returns false.
"""
for user, timestamp in self.typing.iteritems():
if timestamp + 4 > time.time():
return True
if len(self.typing) > 0:
self.typing = {}
self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
return False
def get_typing_list(self):
"""
Returns the names of everyone in the channel who is currently typing.
"""
typing = []
for user, timestamp in self.typing.iteritems():
if timestamp + 4 > time.time():
typing.append(user)
else:
del self.typing[user]
return typing
def mark_read(self, ts=None, update_remote=True, force=False):
if not ts:
ts = SlackTS()
if self.new_messages or force:
if self.channel_buffer:
w.buffer_set(self.channel_buffer, "unread", "")
w.buffer_set(self.channel_buffer, "hotlist", "-1")
if update_remote:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["mark"], {"channel": self.identifier, "ts": ts}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
self.new_messages = False
def user_joined(self, user_id):
# ugly hack - for some reason this gets turned into a list
self.members = set(self.members)
self.members.add(user_id)
self.update_nicklist(user_id)
def user_left(self, user_id):
self.members.discard(user_id)
self.update_nicklist(user_id)
def update_nicklist(self, user=None):
if not self.channel_buffer:
return
if self.type not in ["channel", "group", "mpim"]:
return
w.buffer_set(self.channel_buffer, "nicklist", "1")
# create nicklists for the current channel if they don't exist
# if they do, use the existing pointer
here = w.nicklist_search_group(self.channel_buffer, '', NICK_GROUP_HERE)
if not here:
here = w.nicklist_add_group(self.channel_buffer, '', NICK_GROUP_HERE, "weechat.color.nicklist_group", 1)
afk = w.nicklist_search_group(self.channel_buffer, '', NICK_GROUP_AWAY)
if not afk:
afk = w.nicklist_add_group(self.channel_buffer, '', NICK_GROUP_AWAY, "weechat.color.nicklist_group", 1)
if user and len(self.members) < 1000:
user = self.team.users[user]
nick = w.nicklist_search_nick(self.channel_buffer, "", user.slack_name)
# since this is a change just remove it regardless of where it is
w.nicklist_remove_nick(self.channel_buffer, nick)
# now add it back in to whichever..
nick_group = afk
if self.team.is_user_present(user.identifier):
nick_group = here
if user.identifier in self.members:
w.nicklist_add_nick(self.channel_buffer, nick_group, user.name, user.color_name, "", "", 1)
# if we didn't get a user, build a complete list. this is expensive.
else:
if len(self.members) < 1000:
try:
for user in self.members:
user = self.team.users[user]
if user.deleted:
continue
nick_group = afk
if self.team.is_user_present(user.identifier):
nick_group = here
w.nicklist_add_nick(self.channel_buffer, nick_group, user.name, user.color_name, "", "", 1)
except Exception as e:
dbg("DEBUG: {} {} {}".format(self.identifier, self.name, decode_from_utf8(e)))
else:
w.nicklist_remove_all(self.channel_buffer)
for fn in ["1| too", "2| many", "3| users", "4| to", "5| show"]:
w.nicklist_add_group(self.channel_buffer, '', fn, w.color('white'), 1)
def hash_message(self, ts):
ts = SlackTS(ts)
def calc_hash(msg):
return sha.sha(str(msg.ts)).hexdigest()
if ts in self.messages and not self.messages[ts].hash:
message = self.messages[ts]
tshash = calc_hash(message)
hl = 3
shorthash = tshash[:hl]
while any(x.startswith(shorthash) for x in self.hashed_messages):
hl += 1
shorthash = tshash[:hl]
if shorthash[:-1] in self.hashed_messages:
col_msg = self.hashed_messages.pop(shorthash[:-1])
col_new_hash = calc_hash(col_msg)[:hl]
col_msg.hash = col_new_hash
self.hashed_messages[col_new_hash] = col_msg
self.change_message(str(col_msg.ts))
if col_msg.thread_channel:
col_msg.thread_channel.rename()
self.hashed_messages[shorthash] = message
message.hash = shorthash
class SlackDMChannel(SlackChannel):
"""
Subclass of a normal channel for person-to-person communication, which
has some important differences.
"""
def __init__(self, eventrouter, users, **kwargs):
dmuser = kwargs["user"]
kwargs["name"] = users[dmuser].name
super(SlackDMChannel, self).__init__(eventrouter, **kwargs)
self.type = 'im'
self.update_color()
self.set_name(self.slack_name)
def set_name(self, slack_name):
self.name = slack_name
def get_members(self):
return {self.user}
def create_buffer(self):
if not self.channel_buffer:
super(SlackDMChannel, self).create_buffer()
w.buffer_set(self.channel_buffer, "localvar_set_type", 'private')
def update_color(self):
if config.colorize_private_chats:
self.color_name = get_nick_color_name(self.name)
self.color = w.color(self.color_name)
else:
self.color = ""
self.color_name = ""
def formatted_name(self, style="default", typing=False, present=True, enable_color=False, **kwargs):
if config.colorize_private_chats and enable_color:
print_color = self.color
else:
print_color = ""
if not present:
prepend = " "
else:
prepend = "+"
select = {
"default": self.slack_name,
"sidebar": prepend + self.slack_name,
"base": self.slack_name,
"long_default": "{}.{}".format(self.team.preferred_name, self.slack_name),
"long_base": "{}.{}".format(self.team.preferred_name, self.slack_name),
}
return print_color + select[style]
def open(self, update_remote=True):
self.create_buffer()
# self.active = True
self.get_history()
if "info" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["info"], {"name": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
if update_remote:
if "join" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["join"], {"users": self.user, "return_im": True}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
self.create_buffer()
def rename(self):
if self.channel_buffer:
new_name = self.formatted_name(style="sidebar", present=self.team.is_user_present(self.user), enable_color=config.colorize_private_chats)
if self.current_short_name != new_name:
self.current_short_name = new_name
w.buffer_set(self.channel_buffer, "short_name", new_name)
return True
return False
def refresh(self):
return self.rename()
class SlackGroupChannel(SlackChannel):
"""
A group channel is a private discussion group.
"""
def __init__(self, eventrouter, **kwargs):
super(SlackGroupChannel, self).__init__(eventrouter, **kwargs)
self.type = "group"
self.set_name(self.slack_name)
def set_name(self, slack_name):
self.name = config.group_name_prefix + slack_name
# def formatted_name(self, prepend="#", enable_color=True, basic=False):
# return prepend + self.slack_name
class SlackMPDMChannel(SlackChannel):
"""
An MPDM channel is a special instance of a 'group' channel.
We change the name to look less terrible in weechat.
"""
def __init__(self, eventrouter, **kwargs):
super(SlackMPDMChannel, self).__init__(eventrouter, **kwargs)
n = kwargs.get('name')
self.set_name(n)
self.type = "mpim"
def open(self, update_remote=True):
self.create_buffer()
self.active = True
self.get_history()
if "info" in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["info"], {"channel": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
if update_remote and 'join' in SLACK_API_TRANSLATOR[self.type]:
s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]['join'], {'users': ','.join(self.members)}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
self.eventrouter.receive(s)
# self.create_buffer()
@staticmethod
def adjust_name(n):
return "|".join("-".join(n.split("-")[1:-1]).split("--"))
def set_name(self, n):
self.name = self.adjust_name(n)
def formatted_name(self, style="default", typing=False, **kwargs):
adjusted_name = self.adjust_name(self.slack_name)
if typing and config.channel_name_typing_indicator:
prepend = ">"
else:
prepend = "@"
select = {
"default": adjusted_name,
"sidebar": prepend + adjusted_name,
"base": adjusted_name,
"long_default": "{}.{}".format(self.team.preferred_name, adjusted_name),
"long_base": "{}.{}".format(self.team.preferred_name, adjusted_name),
}
return select[style]
def rename(self):
pass
class SlackThreadChannel(object):
"""
A thread channel is a virtual channel. We don't inherit from
SlackChannel, because most of how it operates will be different.
"""
def __init__(self, eventrouter, parent_message):
self.eventrouter = eventrouter
self.parent_message = parent_message
self.channel_buffer = None
# self.identifier = ""
# self.name = "#" + kwargs['name']
self.type = "thread"
self.got_history = False
self.label = None
# self.set_name(self.slack_name)
# def set_name(self, slack_name):
# self.name = "#" + slack_name
def formatted_name(self, style="default", **kwargs):
hash_or_ts = self.parent_message.hash or self.parent_message.ts
styles = {
"default": " +{}".format(hash_or_ts),
"long_default": "{}.{}".format(self.parent_message.channel.formatted_name(style="long_default"), hash_or_ts),
"sidebar": " +{}".format(hash_or_ts),
}
return styles[style]
def refresh(self):
self.rename()
def mark_read(self, ts=None, update_remote=True, force=False):
if self.channel_buffer:
w.buffer_set(self.channel_buffer, "unread", "")
w.buffer_set(self.channel_buffer, "hotlist", "-1")
def buffer_prnt(self, nick, text, timestamp, **kwargs):
data = "{}\t{}".format(nick, text)
ts = SlackTS(timestamp)
if self.channel_buffer:
# backlog messages - we will update the read marker as we print these
# backlog = False
# if ts <= SlackTS(self.last_read):
# tags = tag("backlog")
# backlog = True
# elif self.type in ["im", "mpdm"]:
# tags = tag("dm")
# self.new_messages = True
# else:
tags = tag("default")
# self.new_messages = True
w.prnt_date_tags(self.channel_buffer, ts.major, tags, data)
modify_print_time(self.channel_buffer, ts.minorstr(), ts.major)
# if backlog:
# self.mark_read(ts, update_remote=False, force=True)
def get_history(self):
self.got_history = True
for message in self.parent_message.submessages:
# message = SlackMessage(message_json, team, channel)
text = message.render()
# print text
suffix = ''
if 'edited' in message.message_json:
suffix = ' (edited)'
# try:
# channel.unread_count += 1
# except:
# channel.unread_count = 1
self.buffer_prnt(message.sender, text + suffix, message.ts)
def send_message(self, message):
# team = self.eventrouter.teams[self.team]
message = linkify_text(message, self.parent_message.team, self)
dbg(message)
request = {"type": "message", "channel": self.parent_message.channel.identifier, "text": message, "_team": self.parent_message.team.team_hash, "user": self.parent_message.team.myidentifier, "thread_ts": str(self.parent_message.ts)}
self.parent_message.team.send_to_websocket(request)
self.mark_read(update_remote=False, force=True)
def open(self, update_remote=True):
self.create_buffer()
self.active = True
self.get_history()
# if "info" in SLACK_API_TRANSLATOR[self.type]:
# s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["info"], {"name": self.identifier}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
# self.eventrouter.receive(s)
# if update_remote:
# if "join" in SLACK_API_TRANSLATOR[self.type]:
# s = SlackRequest(self.team.token, SLACK_API_TRANSLATOR[self.type]["join"], {"name": self.name}, team_hash=self.team.team_hash, channel_identifier=self.identifier)
# self.eventrouter.receive(s)
self.create_buffer()
def rename(self):
if self.channel_buffer and not self.label:
w.buffer_set(self.channel_buffer, "short_name", self.formatted_name(style="sidebar", enable_color=True))
def create_buffer(self):
"""
incomplete (muted doesn't work)
Creates the weechat buffer where the thread magic happens.
"""
if not self.channel_buffer:
self.channel_buffer = w.buffer_new(self.formatted_name(style="long_default"), "buffer_input_callback", "EVENTROUTER", "", "")
self.eventrouter.weechat_controller.register_buffer(self.channel_buffer, self)
w.buffer_set(self.channel_buffer, "localvar_set_type", 'channel')
w.buffer_set(self.channel_buffer, "localvar_set_nick", self.parent_message.team.nick)
w.buffer_set(self.channel_buffer, "localvar_set_channel", self.formatted_name())
w.buffer_set(self.channel_buffer, "short_name", self.formatted_name(style="sidebar", enable_color=True))
time_format = w.config_string(w.config_get("weechat.look.buffer_time_format"))
parent_time = time.localtime(SlackTS(self.parent_message.ts).major)
topic = '{} {} | {}'.format(time.strftime(time_format, parent_time), self.parent_message.sender, self.parent_message.render() )
w.buffer_set(self.channel_buffer, "title", topic)
# self.eventrouter.weechat_controller.set_refresh_buffer_list(True)
# try:
# if self.unread_count != 0:
# for c in range(1, self.unread_count):
# if self.type == "im":
# w.buffer_set(self.channel_buffer, "hotlist", "2")
# else:
# w.buffer_set(self.channel_buffer, "hotlist", "1")
# else:
# pass
# #dbg("no unread in {}".format(self.name))
# except:
# pass
# dbg("exception no unread count")
# if self.unread_count != 0 and not self.muted:
# w.buffer_set(self.channel_buffer, "hotlist", "1")
def destroy_buffer(self, update_remote):
if self.channel_buffer is not None:
self.channel_buffer = None
self.got_history = False
# if update_remote and not eventrouter.shutting_down:
self.active = False
class SlackUser(object):
"""
Represends an individual slack user. Also where you set their name formatting.
"""
def __init__(self, **kwargs):
# We require these two things for a vaid object,
# the rest we can just learn from slack
self.identifier = kwargs["id"]
self.slack_name = kwargs["name"]
self.name = kwargs["name"]
for key, value in kwargs.items():
setattr(self, key, value)
self.update_color()
def __repr__(self):
return "Name:{} Identifier:{}".format(self.name, self.identifier)
def force_color(self, color_name):
self.color_name = color_name
self.color = w.color(self.color_name)
def update_color(self):
# This will automatically be none/"" if the user has disabled nick
# colourization.
self.color_name = get_nick_color_name(self.name)
self.color = w.color(self.color_name)
def formatted_name(self, prepend="", enable_color=True):
if enable_color:
return self.color + prepend + self.name
else:
return prepend + self.name
class SlackBot(SlackUser):
"""
Basically the same as a user, but split out to identify and for future
needs
"""
def __init__(self, **kwargs):
super(SlackBot, self).__init__(**kwargs)
class SlackMessage(object):
"""
Represents a single slack message and associated context/metadata.
These are modifiable and can be rerendered to change a message,
delete a message, add a reaction, add a thread.
Note: these can't be tied to a SlackUser object because users
can be deleted, so we have to store sender in each one.
"""
def __init__(self, message_json, team, channel, override_sender=None):
self.team = team
self.channel = channel
self.message_json = message_json
self.submessages = []
self.thread_channel = None
self.hash = None
if override_sender:
self.sender = override_sender
self.sender_plain = override_sender
else:
senders = self.get_sender()
self.sender, self.sender_plain = senders[0], senders[1]
self.suffix = ''
self.ts = SlackTS(message_json['ts'])
text = self.message_json.get('text')
if text and text.startswith('_') and text.endswith('_') and 'subtype' not in message_json:
message_json['text'] = text[1:-1]
message_json['subtype'] = 'me_message'
if message_json.get('subtype') == 'me_message' and not message_json['text'].startswith(self.sender):
message_json['text'] = self.sender + ' ' + self.message_json['text']
def __hash__(self):
return hash(self.ts)
def render(self, force=False):
if len(self.submessages) > 0:
return "{} {} {}".format(render(self.message_json, self.team, self.channel, force), self.suffix, "{}[ Thread: {} Replies: {} ]".format(w.color(config.thread_suffix_color), self.hash or self.ts, len(self.submessages)))
return "{} {}".format(render(self.message_json, self.team, self.channel, force), self.suffix)
def change_text(self, new_text):
self.message_json["text"] = new_text
dbg(self.message_json)
def change_suffix(self, new_suffix):
self.suffix = new_suffix
dbg(self.message_json)
def get_sender(self):
name = ""
name_plain = ""
if self.message_json.get('bot_id') in self.team.bots:
name = "{} :]".format(self.team.bots[self.message_json["bot_id"]].formatted_name())
name_plain = "{}".format(self.team.bots[self.message_json["bot_id"]].formatted_name(enable_color=False))
elif 'user' in self.message_json:
if self.message_json['user'] == self.team.myidentifier:
name = self.team.users[self.team.myidentifier].name
name_plain = self.team.users[self.team.myidentifier].name
elif self.message_json['user'] in self.team.users:
u = self.team.users[self.message_json['user']]
if u.is_bot:
name = "{} :]".format(u.formatted_name())
else:
name = "{}".format(u.formatted_name())
name_plain = "{}".format(u.formatted_name(enable_color=False))
elif 'username' in self.message_json:
name = "-{}-".format(self.message_json["username"])
name_plain = "{}".format(self.message_json["username"])
elif 'service_name' in self.message_json:
name = "-{}-".format(self.message_json["service_name"])
name_plain = "{}".format(self.message_json["service_name"])
else:
name = ""
name_plain = ""
return (name, name_plain)
def add_reaction(self, reaction, user):
m = self.message_json.get('reactions', None)
if m:
found = False
for r in m:
if r["name"] == reaction and user not in r["users"]:
r["users"].append(user)
found = True
if not found:
self.message_json["reactions"].append({"name": reaction, "users": [user]})
else:
self.message_json["reactions"] = [{"name": reaction, "users": [user]}]
def remove_reaction(self, reaction, user):
m = self.message_json.get('reactions', None)
if m:
for r in m:
if r["name"] == reaction and user in r["users"]:
r["users"].remove(user)
else:
pass
class SlackThreadMessage(SlackMessage):
def __init__(self, parent_id, *args):
super(SlackThreadMessage, self).__init__(*args)
self.parent_id = parent_id
class WeeSlackMetadata(object):
"""
A simple container that we pickle/unpickle to hold data.
"""
def __init__(self, meta):
self.meta = meta
def jsonify(self):
return self.meta
class SlackTS(object):
def __init__(self, ts=None):
if ts:
self.major, self.minor = [int(x) for x in ts.split('.', 1)]
else:
self.major = int(time.time())
self.minor = 0
def __cmp__(self, other):
if isinstance(other, SlackTS):
if self.major < other.major:
return -1
elif self.major > other.major:
return 1
elif self.major == other.major:
if self.minor < other.minor:
return -1
elif self.minor > other.minor:
return 1
else:
return 0
else:
s = self.__str__()
if s < other:
return -1
elif s > other:
return 1
elif s == other:
return 0
def __hash__(self):
return hash("{}.{}".format(self.major, self.minor))
def __repr__(self):
return str("{0}.{1:06d}".format(self.major, self.minor))
def split(self, *args, **kwargs):
return [self.major, self.minor]
def majorstr(self):
return str(self.major)
def minorstr(self):
return str(self.minor)
###### New handlers
def handle_rtmstart(login_data, eventrouter):
"""
This handles the main entry call to slack, rtm.start
"""
metadata = pickle.loads(login_data["wee_slack_request_metadata"])
if not login_data["ok"]:
w.prnt("", "ERROR: Failed connecting to Slack with token {}: {}"
.format(metadata.token, login_data["error"]))
return
# Let's reuse a team if we have it already.
th = SlackTeam.generate_team_hash(login_data['self']['name'], login_data['team']['domain'])
if not eventrouter.teams.get(th):
users = {}
for item in login_data["users"]:
users[item["id"]] = SlackUser(**item)
bots = {}
for item in login_data["bots"]:
bots[item["id"]] = SlackBot(**item)
channels = {}
for item in login_data["channels"]:
channels[item["id"]] = SlackChannel(eventrouter, **item)
for item in login_data["ims"]:
channels[item["id"]] = SlackDMChannel(eventrouter, users, **item)
for item in login_data["groups"]:
if item["name"].startswith('mpdm-'):
channels[item["id"]] = SlackMPDMChannel(eventrouter, **item)
else:
channels[item["id"]] = SlackGroupChannel(eventrouter, **item)
t = SlackTeam(
eventrouter,
metadata.token,
login_data['url'],
login_data["team"]["domain"],
login_data["self"]["name"],
login_data["self"]["id"],
users,
bots,
channels,
muted_channels=login_data["self"]["prefs"]["muted_channels"],
highlight_words=login_data["self"]["prefs"]["highlight_words"],
)
eventrouter.register_team(t)
else:
t = eventrouter.teams.get(th)
t.set_reconnect_url(login_data['url'])
t.connect()
t.buffer_prnt('Connected to Slack')
t.buffer_prnt('{:<20} {}'.format("Websocket URL", login_data["url"]))
t.buffer_prnt('{:<20} {}'.format("User name", login_data["self"]["name"]))
t.buffer_prnt('{:<20} {}'.format("User ID", login_data["self"]["id"]))
t.buffer_prnt('{:<20} {}'.format("Team name", login_data["team"]["name"]))
t.buffer_prnt('{:<20} {}'.format("Team domain", login_data["team"]["domain"]))
t.buffer_prnt('{:<20} {}'.format("Team id", login_data["team"]["id"]))
dbg("connected to {}".format(t.domain))
def handle_channelsinfo(channel_json, eventrouter, **kwargs):
request_metadata = pickle.loads(channel_json["wee_slack_request_metadata"])
team = eventrouter.teams[request_metadata.team_hash]
channel = team.channels[request_metadata.channel_identifier]
unread_count_display = channel_json['channel']['unread_count_display']
channel.set_unread_count_display(unread_count_display)
def handle_groupsinfo(group_json, eventrouter, **kwargs):
request_metadata = pickle.loads(group_json["wee_slack_request_metadata"])
team = eventrouter.teams[request_metadata.team_hash]
group = team.channels[request_metadata.channel_identifier]
unread_count_display = group_json['group']['unread_count_display']
group_id = group_json['group']['id']
group.set_unread_count_display(unread_count_display)
def handle_conversationsopen(conversation_json, eventrouter, object_name='channel', **kwargs):
request_metadata = pickle.loads(conversation_json["wee_slack_request_metadata"])
# Set unread count if the channel isn't new (channel_identifier exists)
if hasattr(request_metadata, 'channel_identifier'):
channel_id = request_metadata.channel_identifier
team = eventrouter.teams[request_metadata.team_hash]
conversation = team.channels[channel_id]
unread_count_display = conversation_json[object_name]['unread_count_display']
conversation.set_unread_count_display(unread_count_display)
def handle_mpimopen(mpim_json, eventrouter, object_name='group', **kwargs):
handle_conversationsopen(mpim_json, eventrouter, object_name, **kwargs)
def handle_groupshistory(message_json, eventrouter, **kwargs):
handle_history(message_json, eventrouter, **kwargs)
def handle_channelshistory(message_json, eventrouter, **kwargs):
handle_history(message_json, eventrouter, **kwargs)
def handle_imhistory(message_json, eventrouter, **kwargs):
handle_history(message_json, eventrouter, **kwargs)
def handle_mpimhistory(message_json, eventrouter, **kwargs):
handle_history(message_json, eventrouter, **kwargs)
def handle_history(message_json, eventrouter, **kwargs):
request_metadata = pickle.loads(message_json["wee_slack_request_metadata"])
kwargs['team'] = eventrouter.teams[request_metadata.team_hash]
kwargs['channel'] = kwargs['team'].channels[request_metadata.channel_identifier]
try:
clear = request_metadata.clear
except:
clear = False
dbg(clear)
kwargs['output_type'] = "backlog"
if clear:
w.buffer_clear(kwargs['channel'].channel_buffer)
for message in reversed(message_json["messages"]):
process_message(message, eventrouter, **kwargs)
###### New/converted process_ and subprocess_ methods
def process_reconnect_url(message_json, eventrouter, **kwargs):
kwargs['team'].set_reconnect_url(message_json['url'])
def process_manual_presence_change(message_json, eventrouter, **kwargs):
process_presence_change(message_json, eventrouter, **kwargs)
def process_presence_change(message_json, eventrouter, **kwargs):
if "user" in kwargs:
user = kwargs["user"]
team = kwargs["team"]
team.update_member_presence(user, message_json["presence"])
def process_pref_change(message_json, eventrouter, **kwargs):
team = kwargs["team"]
if message_json['name'] == 'muted_channels':
team.set_muted_channels(message_json['value'])
elif message_json['name'] == 'highlight_words':
team.set_highlight_words(message_json['value'])
else:
dbg("Preference change not implemented: {}\n".format(message_json['name']))
def process_user_typing(message_json, eventrouter, **kwargs):
channel = kwargs["channel"]
team = kwargs["team"]
if channel:
channel.set_typing(team.users.get(message_json["user"]).name)
w.bar_item_update("slack_typing_notice")
def process_team_join(message_json, eventrouter, **kwargs):
user = message_json['user']
team = kwargs["team"]
team.users[user["id"]] = SlackUser(**user)
def process_pong(message_json, eventrouter, **kwargs):
pass
def process_message(message_json, eventrouter, store=True, **kwargs):
channel = kwargs["channel"]
team = kwargs["team"]
# try:
# send these subtype messages elsewhere
known_subtypes = [
'thread_message',
'message_replied',
'message_changed',
'message_deleted',
'channel_join',
'channel_leave',
'channel_topic',
# 'group_join',
# 'group_leave',
]
if "thread_ts" in message_json and "reply_count" not in message_json:
message_json["subtype"] = "thread_message"
subtype = message_json.get("subtype", None)
if subtype and subtype in known_subtypes:
f = eval('subprocess_' + subtype)
f(message_json, eventrouter, channel, team)
else:
message = SlackMessage(message_json, team, channel)
text = message.render()
dbg("Rendered message: %s" % text)
dbg("Sender: %s (%s)" % (message.sender, message.sender_plain))
# Handle actions (/me).
# We don't use `subtype` here because creating the SlackMessage may
# have changed the subtype based on the detected message contents.
if message.message_json.get('subtype') == 'me_message':
try:
channel.unread_count_display += 1
except:
channel.unread_count_display = 1
channel.buffer_prnt(w.prefix("action").rstrip(), text, message.ts, tag_nick=message.sender_plain, **kwargs)
else:
suffix = ''
if 'edited' in message_json:
suffix = ' (edited)'
try:
channel.unread_count_display += 1
except:
channel.unread_count_display = 1
channel.buffer_prnt(message.sender, text + suffix, message.ts, tag_nick=message.sender_plain, **kwargs)
if store:
channel.store_message(message, team)
dbg("NORMAL REPLY {}".format(message_json))
# except:
# channel.buffer_prnt("WEE-SLACK-ERROR", json.dumps(message_json), message_json["ts"], **kwargs)
# traceback.print_exc()
def subprocess_thread_message(message_json, eventrouter, channel, team):
# print ("THREADED: " + str(message_json))
parent_ts = message_json.get('thread_ts', None)
if parent_ts:
parent_message = channel.messages.get(SlackTS(parent_ts), None)
if parent_message:
message = SlackThreadMessage(parent_ts, message_json, team, channel)
parent_message.submessages.append(message)
channel.hash_message(parent_ts)
channel.store_message(message, team)
channel.change_message(parent_ts)
text = message.render()
# channel.buffer_prnt(message.sender, text, message.ts, **kwargs)
if parent_message.thread_channel:
parent_message.thread_channel.buffer_prnt(message.sender, text, message.ts)
# channel = channels.find(message_json["channel"])
# server = channel.server
# #threadinfo = channel.get_message(message_json["thread_ts"])
# message = Message(message_json, server=server, channel=channel)
# dbg(message, main_buffer=True)
#
# orig = channel.get_message(message_json['thread_ts'])
# if orig[0]:
# channel.get_message(message_json['thread_ts'])[2].add_thread_message(message)
# else:
# dbg("COULDN'T find orig message {}".format(message_json['thread_ts']), main_buffer=True)
# if threadinfo[0]:
# channel.messages[threadinfo[1]].become_thread()
# message_json["item"]["ts"], message_json)
# channel.change_message(message_json["thread_ts"], None, message_json["text"])
# channel.become_thread(message_json["item"]["ts"], message_json)
def subprocess_channel_join(message_json, eventrouter, channel, team):
joinprefix = w.prefix("join")
message = SlackMessage(message_json, team, channel, override_sender=joinprefix)
channel.buffer_prnt(joinprefix, message.render(), message_json["ts"], tagset='joinleave')
channel.user_joined(message_json['user'])
def subprocess_channel_leave(message_json, eventrouter, channel, team):
leaveprefix = w.prefix("quit")
message = SlackMessage(message_json, team, channel, override_sender=leaveprefix)
channel.buffer_prnt(leaveprefix, message.render(), message_json["ts"], tagset='joinleave')
channel.user_left(message_json['user'])
# channel.update_nicklist(message_json['user'])
# channel.update_nicklist()
def subprocess_message_replied(message_json, eventrouter, channel, team):
pass
def subprocess_message_changed(message_json, eventrouter, channel, team):
m = message_json.get("message", None)
if m:
new_message = m
# message = SlackMessage(new_message, team, channel)
if "attachments" in m:
message_json["attachments"] = m["attachments"]
if "text" in m:
if "text" in message_json:
message_json["text"] += m["text"]
dbg("added text!")
else:
message_json["text"] = m["text"]
if "fallback" in m:
if "fallback" in message_json:
message_json["fallback"] += m["fallback"]
else:
message_json["fallback"] = m["fallback"]
new_message["text"] += unwrap_attachments(message_json, new_message["text"])
if "edited" in new_message:
channel.change_message(new_message["ts"], new_message["text"], ' (edited)')
else:
channel.change_message(new_message["ts"], new_message["text"])
def subprocess_message_deleted(message_json, eventrouter, channel, team):
channel.change_message(message_json["deleted_ts"], "(deleted)", '')
def subprocess_channel_topic(message_json, eventrouter, channel, team):
text = unhtmlescape(unfurl_refs(message_json["text"], ignore_alt_text=False))
channel.buffer_prnt(w.prefix("network").rstrip(), text, message_json["ts"], tagset="muted")
channel.set_topic(unhtmlescape(message_json["topic"]))
def process_reply(message_json, eventrouter, **kwargs):
dbg('processing reply')
team = kwargs["team"]
identifier = message_json["reply_to"]
try:
original_message_json = team.ws_replies[identifier]
del team.ws_replies[identifier]
if "ts" in message_json:
original_message_json["ts"] = message_json["ts"]
else:
dbg("no reply ts {}".format(message_json))
c = original_message_json.get('channel', None)
channel = team.channels[c]
m = SlackMessage(original_message_json, team, channel)
# if "type" in message_json:
# if message_json["type"] == "message" and "channel" in message_json.keys():
# message_json["ts"] = message_json["ts"]
# channels.find(message_json["channel"]).store_message(m, from_me=True)
# channels.find(message_json["channel"]).buffer_prnt(server.nick, m.render(), m.ts)
process_message(m.message_json, eventrouter, channel=channel, team=team)
channel.mark_read(update_remote=True, force=True)
dbg("REPLY {}".format(message_json))
except KeyError:
dbg("Unexpected reply {}".format(message_json))
def process_channel_marked(message_json, eventrouter, **kwargs):
"""
complete
"""
channel = kwargs["channel"]
ts = message_json.get("ts", None)
if ts:
channel.mark_read(ts=ts, force=True, update_remote=False)
else:
dbg("tried to mark something weird {}".format(message_json))
def process_group_marked(message_json, eventrouter, **kwargs):
process_channel_marked(message_json, eventrouter, **kwargs)
def process_im_marked(message_json, eventrouter, **kwargs):
process_channel_marked(message_json, eventrouter, **kwargs)
def process_mpim_marked(message_json, eventrouter, **kwargs):
process_channel_marked(message_json, eventrouter, **kwargs)
def process_channel_joined(message_json, eventrouter, **kwargs):
item = message_json["channel"]
kwargs['team'].channels[item["id"]].update_from_message_json(item)
kwargs['team'].channels[item["id"]].open()
def process_channel_created(message_json, eventrouter, **kwargs):
item = message_json["channel"]
c = SlackChannel(eventrouter, team=kwargs["team"], **item)
kwargs['team'].channels[item["id"]] = c
kwargs['team'].buffer_prnt('Channel created: {}'.format(c.slack_name))
def process_channel_rename(message_json, eventrouter, **kwargs):
item = message_json["channel"]
channel = kwargs['team'].channels[item["id"]]
channel.slack_name = message_json['channel']['name']
def process_im_created(message_json, eventrouter, **kwargs):
team = kwargs['team']
item = message_json["channel"]
c = SlackDMChannel(eventrouter, team=team, users=team.users, **item)
team.channels[item["id"]] = c
kwargs['team'].buffer_prnt('IM channel created: {}'.format(c.name))
def process_im_open(message_json, eventrouter, **kwargs):
channel = kwargs['channel']
item = message_json
kwargs['team'].channels[item["channel"]].check_should_open(True)
w.buffer_set(channel.channel_buffer, "hotlist", "2")
def process_im_close(message_json, eventrouter, **kwargs):
item = message_json
cbuf = kwargs['team'].channels[item["channel"]].channel_buffer
eventrouter.weechat_controller.unregister_buffer(cbuf, False, True)
def process_group_joined(message_json, eventrouter, **kwargs):
item = message_json["channel"]
if item["name"].startswith("mpdm-"):
c = SlackMPDMChannel(eventrouter, team=kwargs["team"], **item)
else:
c = SlackGroupChannel(eventrouter, team=kwargs["team"], **item)
kwargs['team'].channels[item["id"]] = c
kwargs['team'].channels[item["id"]].open()
def process_reaction_added(message_json, eventrouter, **kwargs):
channel = kwargs['team'].channels[message_json["item"]["channel"]]
if message_json["item"].get("type") == "message":
ts = SlackTS(message_json['item']["ts"])
message = channel.messages.get(ts, None)
if message:
message.add_reaction(message_json["reaction"], message_json["user"])
channel.change_message(ts)
else:
dbg("reaction to item type not supported: " + str(message_json))
def process_reaction_removed(message_json, eventrouter, **kwargs):
channel = kwargs['team'].channels[message_json["item"]["channel"]]
if message_json["item"].get("type") == "message":
ts = SlackTS(message_json['item']["ts"])
message = channel.messages.get(ts, None)
if message:
message.remove_reaction(message_json["reaction"], message_json["user"])
channel.change_message(ts)
else:
dbg("Reaction to item type not supported: " + str(message_json))
###### New module/global methods
def render_formatting(text):
text = re.sub(r'(^| )\*([^*]+)\*([^a-zA-Z0-9_]|$)',
r'\1{}\2{}\3'.format(w.color(config.render_bold_as),
w.color('-' + config.render_bold_as)),
text)
text = re.sub(r'(^| )_([^_]+)_([^a-zA-Z0-9_]|$)',
r'\1{}\2{}\3'.format(w.color(config.render_italic_as),
w.color('-' + config.render_italic_as)),
text)
return text
def render(message_json, team, channel, force=False):
# If we already have a rendered version in the object, just return that.
if not force and message_json.get("_rendered_text", ""):
return message_json["_rendered_text"]
else:
# server = servers.find(message_json["_server"])
if "fallback" in message_json:
text = message_json["fallback"]
elif "text" in message_json:
if message_json['text'] is not None:
text = message_json["text"]
else:
text = ""
else:
text = ""
text = unfurl_refs(text, ignore_alt_text=config.unfurl_ignore_alt_text)
text += unfurl_refs(unwrap_attachments(message_json, text), ignore_alt_text=config.unfurl_ignore_alt_text)
text = text.lstrip()
text = unhtmlescape(text.replace("\t", " "))
if message_json.get('mrkdwn', True):
text = render_formatting(text)
# if self.threads:
# text += " [Replies: {} Thread ID: {} ] ".format(len(self.threads), self.thread_id)
# #for thread in self.threads:
text += create_reaction_string(message_json.get("reactions", ""))
message_json["_rendered_text"] = text
return text
def linkify_text(message, team, channel):
# The get_username_map function is a bit heavy, but this whole
# function is only called on message send..
usernames = team.get_username_map()
channels = team.get_channel_map()
message = (message
# Replace IRC formatting chars with Slack formatting chars.
.replace('\x02', '*')
.replace('\x1D', '_')
.replace('\x1F', config.map_underline_to)
# Escape chars that have special meaning to Slack. Note that we do not
# (and should not) perform full HTML entity-encoding here.
# See https://api.slack.com/docs/message-formatting for details.
.replace('&', '&')
.replace('<', '<')
.replace('>', '>')
.split(' '))
for item in enumerate(message):
targets = re.match('^\s*([@#])([\w.-]+[\w. -])(\W*)', item[1])
if targets and targets.groups()[0] == '@':
named = targets.groups()
if named[1] in ["group", "channel", "here"]:
message[item[0]] = "<!{}>".format(named[1])
else:
try:
if usernames[named[1]]:
message[item[0]] = "<@{}>{}".format(usernames[named[1]], named[2])
except:
message[item[0]] = "@{}{}".format(named[1], named[2])
if targets and targets.groups()[0] == '#':
named = targets.groups()
try:
if channels[named[1]]:
message[item[0]] = "<#{}|{}>{}".format(channels[named[1]], named[1], named[2])
except:
message[item[0]] = "#{}{}".format(named[1], named[2])
# dbg(message)
return " ".join(message)
def unfurl_refs(text, ignore_alt_text=False):
"""
input : <@U096Q7CQM|someuser> has joined the channel
ouput : someuser has joined the channel
"""
# Find all strings enclosed by <>
# - <https://example.com|example with spaces>
# - <#C2147483705|#otherchannel>
# - <@U2147483697|@othernick>
# Test patterns lives in ./_pytest/test_unfurl.py
matches = re.findall(r"(<[@#]?(?:[^>]*)>)", text)
for m in matches:
# Replace them with human readable strings
text = text.replace(m, unfurl_ref(m[1:-1], ignore_alt_text))
return text
def unfurl_ref(ref, ignore_alt_text=False):
id = ref.split('|')[0]
display_text = ref
if ref.find('|') > -1:
if ignore_alt_text:
display_text = resolve_ref(id)
else:
if id.startswith("#C"):
display_text = "#{}".format(ref.split('|')[1])
elif id.startswith("@U"):
display_text = ref.split('|')[1]
else:
url, desc = ref.split('|', 1)
display_text = "{} ({})".format(url, desc)
else:
display_text = resolve_ref(ref)
return display_text
def unhtmlescape(text):
return text.replace("<", "<") \
.replace(">", ">") \
.replace("&", "&")
def unwrap_attachments(message_json, text_before):
attachment_text = ''
a = message_json.get("attachments", None)
if a:
if text_before:
attachment_text = '\n'
for attachment in a:
# Attachments should be rendered roughly like:
#
# $pretext
# $author: (if rest of line is non-empty) $title ($title_link) OR $from_url
# $author: (if no $author on previous line) $text
# $fields
t = []
prepend_title_text = ''
if 'author_name' in attachment:
prepend_title_text = attachment['author_name'] + ": "
if 'pretext' in attachment:
t.append(attachment['pretext'])
title = attachment.get('title', None)
title_link = attachment.get('title_link', '')
if title_link in text_before:
title_link = ''
if title and title_link:
t.append('%s%s (%s)' % (prepend_title_text, title, title_link,))
prepend_title_text = ''
elif title and not title_link:
t.append('%s%s' % (prepend_title_text, title,))
prepend_title_text = ''
from_url = attachment.get('from_url', '')
if from_url not in text_before:
t.append(from_url)
atext = attachment.get("text", None)
if atext:
tx = re.sub(r' *\n[\n ]+', '\n', atext)
t.append(prepend_title_text + tx)
prepend_title_text = ''
fields = attachment.get("fields", None)
if fields:
for f in fields:
if f['title'] != '':
t.append('%s %s' % (f['title'], f['value'],))
else:
t.append(f['value'])
fallback = attachment.get("fallback", None)
if t == [] and fallback:
t.append(fallback)
attachment_text += "\n".join([x.strip() for x in t if x])
return attachment_text
def resolve_ref(ref):
# TODO: This hack to use eventrouter needs to go
# this resolver should probably move to the slackteam or eventrouter itself
# global EVENTROUTER
if 'EVENTROUTER' in globals():
e = EVENTROUTER
if ref.startswith('@U') or ref.startswith('@W'):
for t in e.teams.keys():
if ref[1:] in e.teams[t].users:
# try:
return "@{}".format(e.teams[t].users[ref[1:]].name)
# except:
# dbg("NAME: {}".format(ref))
elif ref.startswith('#C'):
for t in e.teams.keys():
if ref[1:] in e.teams[t].channels:
# try:
return "{}".format(e.teams[t].channels[ref[1:]].name)
# except:
# dbg("CHANNEL: {}".format(ref))
# Something else, just return as-is
return ref
def create_reaction_string(reactions):
count = 0
if not isinstance(reactions, list):
reaction_string = " [{}]".format(reactions)
else:
reaction_string = ' ['
for r in reactions:
if len(r["users"]) > 0:
count += 1
if config.show_reaction_nicks:
nicks = [resolve_ref("@{}".format(user)) for user in r["users"]]
users = "({})".format(",".join(nicks))
else:
users = len(r["users"])
reaction_string += ":{}:{} ".format(r["name"], users)
reaction_string = reaction_string[:-1] + ']'
if count == 0:
reaction_string = ''
return reaction_string
def modify_buffer_line(buffer, new_line, timestamp, time_id):
# get a pointer to this buffer's lines
own_lines = w.hdata_pointer(w.hdata_get('buffer'), buffer, 'own_lines')
if own_lines:
# get a pointer to the last line
line_pointer = w.hdata_pointer(w.hdata_get('lines'), own_lines, 'last_line')
# hold the structure of a line and of line data
struct_hdata_line = w.hdata_get('line')
struct_hdata_line_data = w.hdata_get('line_data')
# keep track of the number of lines with the matching time and id
number_of_matching_lines = 0
while line_pointer:
# get a pointer to the data in line_pointer via layout of struct_hdata_line
data = w.hdata_pointer(struct_hdata_line, line_pointer, 'data')
if data:
line_timestamp = w.hdata_time(struct_hdata_line_data, data, 'date')
line_time_id = w.hdata_integer(struct_hdata_line_data, data, 'date_printed')
# prefix = w.hdata_string(struct_hdata_line_data, data, 'prefix')
if timestamp == int(line_timestamp) and int(time_id) == line_time_id:
number_of_matching_lines += 1
elif number_of_matching_lines > 0:
# since number_of_matching_lines is non-zero, we have
# already reached the message and can stop traversing
break
else:
dbg(('Encountered line without any data while trying to modify '
'line. This is not handled, so aborting modification.'))
return w.WEECHAT_RC_ERROR
# move backwards one line and try again - exit the while if you hit the end
line_pointer = w.hdata_move(struct_hdata_line, line_pointer, -1)
# split the message into at most the number of existing lines
lines = new_line.split('\n', number_of_matching_lines - 1)
# updating a line with a string containing newlines causes the lines to
# be broken when viewed in bare display mode
lines = [line.replace('\n', ' | ') for line in lines]
# pad the list with empty strings until the number of elements equals
# number_of_matching_lines
lines += [''] * (number_of_matching_lines - len(lines))
if line_pointer:
for line in lines:
line_pointer = w.hdata_move(struct_hdata_line, line_pointer, 1)
data = w.hdata_pointer(struct_hdata_line, line_pointer, 'data')
w.hdata_update(struct_hdata_line_data, data, {"message": line})
return w.WEECHAT_RC_OK
def modify_print_time(buffer, new_id, time):
"""
This overloads the time printed field to let us store the slack
per message unique id that comes after the "." in a slack ts
"""
# get a pointer to this buffer's lines
own_lines = w.hdata_pointer(w.hdata_get('buffer'), buffer, 'own_lines')
if own_lines:
# get a pointer to the last line
line_pointer = w.hdata_pointer(w.hdata_get('lines'), own_lines, 'last_line')
# hold the structure of a line and of line data
struct_hdata_line = w.hdata_get('line')
struct_hdata_line_data = w.hdata_get('line_data')
prefix = ''
while not prefix and line_pointer:
# get a pointer to the data in line_pointer via layout of struct_hdata_line
data = w.hdata_pointer(struct_hdata_line, line_pointer, 'data')
if data:
prefix = w.hdata_string(struct_hdata_line_data, data, 'prefix')
w.hdata_update(struct_hdata_line_data, data, {"date_printed": new_id})
else:
dbg('Encountered line without any data while setting message id.')
return w.WEECHAT_RC_ERROR
# move backwards one line and repeat, so all the lines of the message are set
# exit when you reach a prefix, which means you have reached the
# first line of the message, or if you hit the end
line_pointer = w.hdata_move(struct_hdata_line, line_pointer, -1)
return w.WEECHAT_RC_OK
def tag(tagset, user=None):
if user:
user.replace(" ", "_")
default_tag = "nick_" + user
else:
default_tag = 'nick_unknown'
tagsets = {
# messages in the team/server buffer, e.g. "new channel created"
"team": "irc_notice,notify_private,log3",
# when replaying something old
"backlog": "irc_privmsg,no_highlight,notify_none,logger_backlog",
# when posting messages to a muted channel
"muted": "irc_privmsg,no_highlight,notify_none,log1",
# when receiving a direct message
"dm": "irc_privmsg,notify_private,log1",
"dmfromme": "irc_privmsg,no_highlight,notify_none,log1",
# when this is a join/leave, attach for smart filter ala:
# if user in [x.strip() for x in w.prefix("join"), w.prefix("quit")]
"joinleave": "irc_smart_filter,no_highlight,log4",
# catchall ?
"default": "irc_privmsg,notify_message,log1",
}
return default_tag + "," + tagsets[tagset]
###### New/converted command_ commands
@slack_buffer_or_ignore
@utf8_decode
def part_command_cb(data, current_buffer, args):
e = EVENTROUTER
args = args.split()
if len(args) > 1:
team = e.weechat_controller.buffers[current_buffer].team
cmap = team.get_channel_map()
channel = "".join(args[1:])
if channel in cmap:
buffer_ptr = team.channels[cmap[channel]].channel_buffer
e.weechat_controller.unregister_buffer(buffer_ptr, update_remote=True, close_buffer=True)
else:
e.weechat_controller.unregister_buffer(current_buffer, update_remote=True, close_buffer=True)
return w.WEECHAT_RC_OK_EAT
def parse_topic_command(command):
args = command.split()[1:]
channel_name = None
topic = None
if args:
if args[0].startswith('#'):
channel_name = args[0][1:]
topic = args[1:]
else:
topic = args
if topic == []:
topic = None
if topic:
topic = ' '.join(topic)
if topic == '-delete':
topic = ''
return channel_name, topic
@slack_buffer_or_ignore
@utf8_decode
def topic_command_cb(data, current_buffer, command):
"""
Change the topic of a channel
/topic [<channel>] [<topic>|-delete]
"""
channel_name, topic = parse_topic_command(command)
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
if channel_name:
channel = team.channels.get(team.get_channel_map().get(channel_name))
else:
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
if not channel:
w.prnt(team.channel_buffer, "#{}: No such channel".format(channel_name))
return w.WEECHAT_RC_OK_EAT
if topic is None:
w.prnt(channel.channel_buffer, 'Topic for {} is "{}"'.format(channel.name, channel.topic))
else:
s = SlackRequest(team.token, "channels.setTopic", {"channel": channel.identifier, "topic": topic}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_or_ignore
@utf8_decode
def me_command_cb(data, current_buffer, args):
message = "_{}_".format(args.split(' ', 1)[1])
buffer_input_callback("EVENTROUTER", current_buffer, message)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_or_ignore
@utf8_decode
def msg_command_cb(data, current_buffer, args):
dbg("msg_command_cb")
aargs = args.split(None, 2)
who = aargs[1]
if who == "*":
who = EVENTROUTER.weechat_controller.buffers[current_buffer].slack_name
else:
command_talk(data, current_buffer, who)
if len(aargs) > 2:
message = aargs[2]
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
cmap = team.get_channel_map()
if who in cmap:
channel = team.channels[cmap[who]]
channel.send_message(message)
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_channels(data, current_buffer, args):
e = EVENTROUTER
team = e.weechat_controller.buffers[current_buffer].team
team.buffer_prnt("Channels:")
for channel in team.get_channel_map():
team.buffer_prnt(" {}".format(channel))
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def command_users(data, current_buffer, args):
e = EVENTROUTER
team = e.weechat_controller.buffers[current_buffer].team
team.buffer_prnt("Users:")
for user in team.users.values():
team.buffer_prnt(" {:<25}({})".format(user.name, user.presence))
return w.WEECHAT_RC_OK_EAT
@slack_buffer_or_ignore
@utf8_decode
def command_talk(data, current_buffer, args):
"""
Open a chat with the specified user(s)
/slack talk <user>[,<user2>[,<user3>...]]
"""
e = EVENTROUTER
team = e.weechat_controller.buffers[current_buffer].team
channel_name = args.split(' ')[1]
if channel_name.startswith('#'):
channel_name = channel_name[1:]
# Try finding the channel by name
chan = team.channels.get(team.get_channel_map().get(channel_name))
# If the channel doesn't exist, try finding a DM or MPDM instead
if not chan:
# Get the IDs of the users
u = team.get_username_map()
users = set()
for user in channel_name.split(','):
if user.startswith('@'):
user = user[1:]
if user in u:
users.add(u[user])
if users:
if len(users) > 1:
channel_type = 'mpim'
# Add the current user since MPDMs include them as a member
users.add(team.myidentifier)
else:
channel_type = 'im'
# Try finding the channel by type and members
for channel in team.channels.itervalues():
if (channel.type == channel_type and
channel.get_members() == users):
chan = channel
break
# If the DM or MPDM doesn't exist, create it
if not chan:
s = SlackRequest(team.token, SLACK_API_TRANSLATOR[channel_type]['join'], {'users': ','.join(users)}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
if chan:
chan.open()
if config.switch_buffer_on_join:
w.buffer_set(chan.channel_buffer, "display", "1")
return w.WEECHAT_RC_OK_EAT
return w.WEECHAT_RC_OK_EAT
def command_showmuted(data, current_buffer, args):
current = w.current_buffer()
w.prnt(EVENTROUTER.weechat_controller.buffers[current].team.channel_buffer, str(EVENTROUTER.weechat_controller.buffers[current].team.muted_channels))
@utf8_decode
def thread_command_callback(data, current_buffer, args):
current = w.current_buffer()
channel = EVENTROUTER.weechat_controller.buffers.get(current)
if channel:
args = args.split()
if args[0] == '/thread':
if len(args) == 2:
try:
pm = channel.messages[SlackTS(args[1])]
except:
pm = channel.hashed_messages[args[1]]
tc = SlackThreadChannel(EVENTROUTER, pm)
pm.thread_channel = tc
tc.open()
# tc.create_buffer()
if config.switch_buffer_on_join:
w.buffer_set(tc.channel_buffer, "display", "1")
return w.WEECHAT_RC_OK_EAT
elif args[0] == '/reply':
count = int(args[1])
msg = " ".join(args[2:])
mkeys = channel.sorted_message_keys()
mkeys.reverse()
parent_id = str(mkeys[count - 1])
channel.send_message(msg, request_dict_ext={"thread_ts": parent_id})
return w.WEECHAT_RC_OK_EAT
w.prnt(current, "Invalid thread command.")
return w.WEECHAT_RC_OK_EAT
@utf8_decode
def rehistory_command_callback(data, current_buffer, args):
current = w.current_buffer()
channel = EVENTROUTER.weechat_controller.buffers.get(current)
channel.got_history = False
w.buffer_clear(channel.channel_buffer)
channel.get_history()
return w.WEECHAT_RC_OK_EAT
@slack_buffer_required
@utf8_decode
def hide_command_callback(data, current_buffer, args):
c = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
if c:
name = c.formatted_name(style='long_default')
if name in config.distracting_channels:
w.buffer_set(c.channel_buffer, "hidden", "1")
return w.WEECHAT_RC_OK_EAT
@utf8_decode
def slack_command_cb(data, current_buffer, args):
a = args.split(' ', 1)
if len(a) > 1:
function_name, args = a[0], args
else:
function_name, args = a[0], args
try:
EVENTROUTER.cmds[function_name]("", current_buffer, args)
except KeyError:
w.prnt("", "Command not found: " + function_name)
return w.WEECHAT_RC_OK
@slack_buffer_required
def command_distracting(data, current_buffer, args):
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer, None)
if channel:
fullname = channel.formatted_name(style="long_default")
if config.distracting_channels.count(fullname) == 0:
config.distracting_channels.append(fullname)
else:
config.distracting_channels.pop(config.distracting_channels.index(fullname))
save_distracting_channels()
def save_distracting_channels():
w.config_set_plugin('distracting_channels', ','.join(config.distracting_channels))
@slack_buffer_required
def command_slash(data, current_buffer, args):
"""
Support for custom slack commands
/slack slash /customcommand arg1 arg2 arg3
"""
e = EVENTROUTER
channel = e.weechat_controller.buffers.get(current_buffer, None)
if channel:
team = channel.team
if args is None:
server.buffer_prnt("Usage: /slack slash /someslashcommand [arguments...].")
return
split_args = args.split(None, 2)
command = split_args[1]
text = split_args[2] if len(split_args) > 2 else ""
s = SlackRequest(team.token, "chat.command", {"command": command, "text": text, 'channel': channel.identifier}, team_hash=team.team_hash, channel_identifier=channel.identifier)
EVENTROUTER.receive(s)
@slack_buffer_required
def command_mute(data, current_buffer, args):
current = w.current_buffer()
channel_id = EVENTROUTER.weechat_controller.buffers[current].identifier
team = EVENTROUTER.weechat_controller.buffers[current].team
if channel_id not in team.muted_channels:
team.muted_channels.add(channel_id)
else:
team.muted_channels.discard(channel_id)
s = SlackRequest(team.token, "users.prefs.set", {"name": "muted_channels", "value": ",".join(team.muted_channels)}, team_hash=team.team_hash, channel_identifier=channel_id)
EVENTROUTER.receive(s)
@slack_buffer_required
def command_openweb(data, current_buffer, args):
# if done from server buffer, open slack for reals
channel = EVENTROUTER.weechat_controller.buffers[current_buffer]
if isinstance(channel, SlackTeam):
url = "https://{}".format(channel.team.domain)
else:
now = SlackTS()
url = "https://{}/archives/{}/p{}000000".format(channel.team.domain, channel.slack_name, now.majorstr())
w.prnt_date_tags(channel.team.channel_buffer, SlackTS().major, "openweb,logger_backlog_end,notify_none", url)
def command_nodistractions(data, current_buffer, args):
global hide_distractions
hide_distractions = not hide_distractions
if config.distracting_channels != ['']:
for channel in config.distracting_channels:
dbg('hiding channel {}'.format(channel))
# try:
for c in EVENTROUTER.weechat_controller.buffers.itervalues():
if c == channel:
dbg('found channel {} to hide'.format(channel))
w.buffer_set(c.channel_buffer, "hidden", str(int(hide_distractions)))
# except:
# dbg("Can't hide channel {} .. removing..".format(channel), main_buffer=True)
# config.distracting_channels.pop(config.distracting_channels.index(channel))
# save_distracting_channels()
@slack_buffer_required
def command_upload(data, current_buffer, args):
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
url = 'https://slack.com/api/files.upload'
fname = args.split(' ', 1)
file_path = os.path.expanduser(fname[1])
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
if ' ' in file_path:
file_path = file_path.replace(' ', '\ ')
command = 'curl -F file=@{} -F channels={} -F token={} {}'.format(file_path, channel.identifier, team.token, url)
w.hook_process(command, config.slack_timeout, '', '')
@utf8_decode
def away_command_cb(data, current_buffer, args):
# TODO: reimplement all.. maybe
(all, message) = re.match("^/away(?:\s+(-all))?(?:\s+(.+))?", args).groups()
if message is None:
command_back(data, current_buffer, args)
else:
command_away(data, current_buffer, args)
return w.WEECHAT_RC_OK
@slack_buffer_required
def command_away(data, current_buffer, args):
"""
Sets your status as 'away'
/slack away
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
s = SlackRequest(team.token, "presence.set", {"presence": "away"}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
@slack_buffer_required
def command_status(data, current_buffer, args):
"""
Lets you set your Slack Status (not to be confused with away/here)
/slack status [emoji] [status_message]
"""
e = EVENTROUTER
channel = e.weechat_controller.buffers.get(current_buffer, None)
if channel:
team = channel.team
if args is None:
server.buffer_prnt("Usage: /slack status [status emoji] [status text].")
return
split_args = args.split(None, 2)
emoji = split_args[1] if len(split_args) > 1 else ""
text = split_args[2] if len(split_args) > 2 else ""
profile = {"status_text":text,"status_emoji":emoji}
s = SlackRequest(team.token, "users.profile.set", {"profile": profile}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
@slack_buffer_required
def command_back(data, current_buffer, args):
"""
Sets your status as 'back'
/slack back
"""
team = EVENTROUTER.weechat_controller.buffers[current_buffer].team
s = SlackRequest(team.token, "presence.set", {"presence": "active"}, team_hash=team.team_hash)
EVENTROUTER.receive(s)
@slack_buffer_required
@utf8_decode
def label_command_cb(data, current_buffer, args):
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
if channel and channel.type == 'thread':
aargs = args.split(None, 2)
new_name = " +" + aargs[1]
channel.label = new_name
w.buffer_set(channel.channel_buffer, "short_name", new_name)
@utf8_decode
def set_unread_cb(data, current_buffer, command):
for channel in EVENTROUTER.weechat_controller.buffers.values():
channel.mark_read()
return w.WEECHAT_RC_OK
@slack_buffer_or_ignore
@utf8_decode
def set_unread_current_buffer_cb(data, current_buffer, command):
channel = EVENTROUTER.weechat_controller.buffers.get(current_buffer)
channel.mark_read()
return w.WEECHAT_RC_OK
def command_p(data, current_buffer, args):
args = args.split(' ', 1)[1]
w.prnt("", "{}".format(eval(args)))
###### NEW EXCEPTIONS
class ProcessNotImplemented(Exception):
"""
Raised when we try to call process_(something), but
(something) has not been defined as a function.
"""
def __init__(self, function_name):
super(ProcessNotImplemented, self).__init__(function_name)
class InvalidType(Exception):
"""
Raised when we do type checking to ensure objects of the wrong
type are not used improperly.
"""
def __init__(self, type_str):
super(InvalidType, self).__init__(type_str)
###### New but probably old and need to migrate
def closed_slack_debug_buffer_cb(data, buffer):
global slack_debug
slack_debug = None
return w.WEECHAT_RC_OK
def create_slack_debug_buffer():
global slack_debug, debug_string
if slack_debug is not None:
w.buffer_set(slack_debug, "display", "1")
else:
debug_string = None
slack_debug = w.buffer_new("slack-debug", "", "", "closed_slack_debug_buffer_cb", "")
w.buffer_set(slack_debug, "notify", "0")
def load_emoji():
try:
global EMOJI
DIR = w.info_get("weechat_dir", "")
# no idea why this does't work w/o checking the type?!
dbg(type(DIR), 0)
ef = open('{}/weemoji.json'.format(DIR), 'r')
EMOJI = json.loads(ef.read())
ef.close()
except:
dbg("Unexpected error: {}".format(sys.exc_info()), 5)
return w.WEECHAT_RC_OK
def setup_hooks():
cmds = {k[8:]: v for k, v in globals().items() if k.startswith("command_")}
w.bar_item_new('slack_typing_notice', 'typing_bar_item_cb', '')
w.hook_timer(1000, 0, 0, "typing_update_cb", "")
w.hook_timer(1000, 0, 0, "buffer_list_update_callback", "EVENTROUTER")
w.hook_timer(3000, 0, 0, "reconnect_callback", "EVENTROUTER")
w.hook_timer(1000 * 60 * 5, 0, 0, "slack_never_away_cb", "")
w.hook_signal('buffer_closing', "buffer_closing_callback", "EVENTROUTER")
w.hook_signal('buffer_switch', "buffer_switch_callback", "EVENTROUTER")
w.hook_signal('window_switch', "buffer_switch_callback", "EVENTROUTER")
w.hook_signal('quit', "quit_notification_cb", "")
if config.send_typing_notice:
w.hook_signal('input_text_changed', "typing_notification_cb", "")
w.hook_command(
# Command name and description
'slack', 'Plugin to allow typing notification and sync of read markers for slack.com',
# Usage
'[command] [command options]',
# Description of arguments
'Commands:\n' +
'\n'.join(cmds.keys()) +
'\nUse /slack help [command] to find out more\n',
# Completions
'|'.join(cmds.keys()),
# Function name
'slack_command_cb', '')
# w.hook_command('me', '', 'stuff', 'stuff2', '', 'me_command_cb', '')
w.hook_command_run('/me', 'me_command_cb', '')
w.hook_command_run('/query', 'command_talk', '')
w.hook_command_run('/join', 'command_talk', '')
w.hook_command_run('/part', 'part_command_cb', '')
w.hook_command_run('/leave', 'part_command_cb', '')
w.hook_command_run('/topic', 'topic_command_cb', '')
w.hook_command_run('/thread', 'thread_command_callback', '')
w.hook_command_run('/reply', 'thread_command_callback', '')
w.hook_command_run('/rehistory', 'rehistory_command_callback', '')
w.hook_command_run('/hide', 'hide_command_callback', '')
w.hook_command_run('/msg', 'msg_command_cb', '')
w.hook_command_run('/label', 'label_command_cb', '')
w.hook_command_run("/input complete_next", "complete_next_cb", "")
w.hook_command_run("/input set_unread", "set_unread_cb", "")
w.hook_command_run("/input set_unread_current_buffer", "set_unread_current_buffer_cb", "")
w.hook_command_run('/away', 'away_command_cb', '')
w.hook_completion("nicks", "complete @-nicks for slack", "nick_completion_cb", "")
w.hook_completion("emoji", "complete :emoji: for slack", "emoji_completion_cb", "")
# Hooks to fix/implement
# w.hook_signal('buffer_opened', "buffer_opened_cb", "")
# w.hook_signal('window_scrolled', "scrolled_cb", "")
# w.hook_timer(3000, 0, 0, "slack_connection_persistence_cb", "")
##### END NEW
def dbg(message, level=0, main_buffer=False, fout=False):
"""
send debug output to the slack-debug buffer and optionally write to a file.
"""
# TODO: do this smarter
# return
if level >= config.debug_level:
global debug_string
message = "DEBUG: {}".format(message)
if fout:
file('/tmp/debug.log', 'a+').writelines(message + '\n')
if main_buffer:
# w.prnt("", "---------")
w.prnt("", "slack: " + message)
else:
if slack_debug and (not debug_string or debug_string in message):
# w.prnt(slack_debug, "---------")
w.prnt(slack_debug, message)
###### Config code
Setting = collections.namedtuple('Setting', ['default', 'desc'])
class PluginConfig(object):
# Default settings.
# These are, initially, each a (default, desc) tuple; the former is the
# default value of the setting, in the (string) format that weechat
# expects, and the latter is the user-friendly description of the setting.
# At __init__ time these values are extracted, the description is used to
# set or update the setting description for use with /help, and the default
# value is used to set the default for any settings not already defined.
# Following this procedure, the keys remain the same, but the values are
# the real (python) values of the settings.
default_settings = {
'background_load_all_history': Setting(
default='false',
desc='Load history for each channel in the background as soon as it'
' opens, rather than waiting for the user to look at it.'),
'channel_name_typing_indicator': Setting(
default='true',
desc='Change the prefix of a channel from # to > when someone is'
' typing in it. Note that this will (temporarily) affect the sort'
' order if you sort buffers by name rather than by number.'),
'colorize_private_chats': Setting(
default='false',
desc='Whether to use nick-colors in DM windows.'),
'debug_mode': Setting(
default='false',
desc='Open a dedicated buffer for debug messages and start logging'
' to it. How verbose the logging is depends on log_level.'),
'debug_level': Setting(
default='3',
desc='Show only this level of debug info (or higher) when'
' debug_mode is on. Lower levels -> more messages.'),
'distracting_channels': Setting(
default='',
desc='List of channels to hide.'),
'group_name_prefix': Setting(
default='&',
desc='The prefix of buffer names for groups (private channels).'),
'map_underline_to': Setting(
default='_',
desc='When sending underlined text to slack, use this formatting'
' character for it. The default ("_") sends it as italics. Use'
' "*" to send bold instead.'),
'never_away': Setting(
default='false',
desc='Poke Slack every five minutes so that it never marks you "away".'),
'record_events': Setting(
default='false',
desc='Log all traffic from Slack to disk as JSON.'),
'render_bold_as': Setting(
default='bold',
desc='When receiving bold text from Slack, render it as this in weechat.'),
'render_italic_as': Setting(
default='italic',
desc='When receiving bold text from Slack, render it as this in weechat.'
' If your terminal lacks italic support, consider using "underline" instead.'),
'send_typing_notice': Setting(
default='true',
desc='Alert Slack users when you are typing a message in the input bar '
'(Requires reload)'),
'server_aliases': Setting(
default='',
desc='A comma separated list of `subdomain:alias` pairs. The alias'
' will be used instead of the actual name of the slack (in buffer'
' names, logging, etc). E.g `work:no_fun_allowed` would make your'
' work slack show up as `no_fun_allowed` rather than `work.slack.com`.'),
'short_buffer_names': Setting(
default='false',
desc='Use `foo.#channel` rather than `foo.slack.com.#channel` as the'
' internal name for Slack buffers. Overrides server_aliases.'),
'show_reaction_nicks': Setting(
default='false',
desc='Display the name of the reacting user(s) alongside each reactji.'),
'slack_api_token': Setting(
default='INSERT VALID KEY HERE!',
desc='List of Slack API tokens, one per Slack instance you want to'
' connect to. See the README for details on how to get these.'),
'slack_timeout': Setting(
default='20000',
desc='How long (ms) to wait when communicating with Slack.'),
'switch_buffer_on_join': Setting(
default='true',
desc='When /joining a channel, automatically switch to it as well.'),
'thread_suffix_color': Setting(
default='lightcyan',
desc='Color to use for the [thread: XXX] suffix on messages that'
' have threads attached to them.'),
'unfurl_ignore_alt_text': Setting(
default='false',
desc='When displaying ("unfurling") links to channels/users/etc,'
' ignore the "alt text" present in the message and instead use the'
' canonical name of the thing being linked to.'),
'unhide_buffers_with_activity': Setting(
default='false',
desc='When activity occurs on a buffer, unhide it even if it was'
' previously hidden (whether by the user or by the'
' distracting_channels setting).'),
}
# Set missing settings to their defaults. Load non-missing settings from
# weechat configs.
def __init__(self):
self.settings = {}
# Set all descriptions, replace the values in the dict with the
# default setting value rather than the (setting,desc) tuple.
# Use items() rather than iteritems() so we don't need to worry about
# invalidating the iterator.
for key, (default, desc) in self.default_settings.items():
w.config_set_desc_plugin(key, desc)
self.settings[key] = default
# Migrate settings from old versions of Weeslack...
self.migrate()
# ...and then set anything left over from the defaults.
for key, default in self.settings.iteritems():
if not w.config_get_plugin(key):
w.config_set_plugin(key, default)
self.config_changed(None, None, None)
def __str__(self):
return "".join([x + "\t" + str(self.settings[x]) + "\n" for x in self.settings.keys()])
def config_changed(self, data, key, value):
for key in self.settings:
self.settings[key] = self.fetch_setting(key)
if self.debug_mode:
create_slack_debug_buffer()
return w.WEECHAT_RC_OK
def fetch_setting(self, key):
if hasattr(self, 'get_' + key):
try:
return getattr(self, 'get_' + key)(key)
except:
return self.settings[key]
else:
# Most settings are on/off, so make get_boolean the default
return self.get_boolean(key)
def __getattr__(self, key):
return self.settings[key]
def get_boolean(self, key):
return w.config_string_to_boolean(w.config_get_plugin(key))
def get_string(self, key):
return w.config_get_plugin(key)
def get_int(self, key):
return int(w.config_get_plugin(key))
get_debug_level = get_int
get_group_name_prefix = get_string
get_map_underline_to = get_string
get_render_bold_as = get_string
get_render_italic_as = get_string
get_slack_timeout = get_int
get_thread_suffix_color = get_string
def get_distracting_channels(self, key):
return [x.strip() for x in w.config_get_plugin(key).split(',')]
def get_server_aliases(self, key):
alias_list = w.config_get_plugin(key)
if len(alias_list) > 0:
return dict(item.split(":") for item in alias_list.split(","))
def get_slack_api_token(self, key):
token = w.config_get_plugin("slack_api_token")
if token.startswith('${sec.data'):
return w.string_eval_expression(token, {}, {}, {})
else:
return token
def migrate(self):
"""
This is to migrate the extension name from slack_extension to slack
"""
if not w.config_get_plugin("migrated"):
for k in self.settings.keys():
if not w.config_is_set_plugin(k):
p = w.config_get("plugins.var.python.slack_extension.{}".format(k))
data = w.config_string(p)
if data != "":
w.config_set_plugin(k, data)
w.config_set_plugin("migrated", "true")
# to Trace execution, add `setup_trace()` to startup
# and to a function and sys.settrace(trace_calls) to a function
def setup_trace():
global f
now = time.time()
f = open('{}/{}-trace.json'.format(RECORD_DIR, now), 'w')
def trace_calls(frame, event, arg):
global f
if event != 'call':
return
co = frame.f_code
func_name = co.co_name
if func_name == 'write':
# Ignore write() calls from print statements
return
func_line_no = frame.f_lineno
func_filename = co.co_filename
caller = frame.f_back
caller_line_no = caller.f_lineno
caller_filename = caller.f_code.co_filename
print >> f, 'Call to %s on line %s of %s from line %s of %s' % \
(func_name, func_line_no, func_filename,
caller_line_no, caller_filename)
f.flush()
return
# Main
if __name__ == "__main__":
w = WeechatWrapper(weechat)
if w.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, "script_unloaded", ""):
weechat_version = w.info_get("version_number", "") or 0
if int(weechat_version) < 0x1030000:
w.prnt("", "\nERROR: Weechat version 1.3+ is required to use {}.\n\n".format(SCRIPT_NAME))
else:
global EVENTROUTER
EVENTROUTER = EventRouter()
# setup_trace()
# WEECHAT_HOME = w.info_get("weechat_dir", "")
# STOP_TALKING_TO_SLACK = False
# Global var section
slack_debug = None
config = PluginConfig()
config_changed_cb = config.config_changed
typing_timer = time.time()
# domain = None
# previous_buffer = None
# slack_buffer = None
# never_away = False
hide_distractions = False
# hotlist = w.infolist_get("hotlist", "", "")
# main_weechat_buffer = w.info_get("irc_buffer", "{}.{}".format(domain, "DOESNOTEXIST!@#$"))
w.hook_config("plugins.var.python." + SCRIPT_NAME + ".*", "config_changed_cb", "")
w.hook_modifier("input_text_for_buffer", "input_text_for_buffer_cb", "")
load_emoji()
setup_hooks()
# attach to the weechat hooks we need
tokens = config.slack_api_token.split(',')
for t in tokens:
s = SlackRequest(t, 'rtm.start', {})
EVENTROUTER.receive(s)
if config.record_events:
EVENTROUTER.record()
EVENTROUTER.handle_next()
w.hook_timer(10, 0, 0, "handle_next", "")
# END attach to the weechat hooks we need
| mit | -3,852,895,032,448,216,000 | 37.0493 | 239 | 0.593313 | false |
RPGOne/Skynet | pytorch-master/torch/nn/modules/linear.py | 1 | 1934 | import math
import torch
from torch.nn.parameter import Parameter
from .module import Module
class Linear(Module):
r"""Applies a linear transformation to the incoming data: :math:`y = Ax + b`
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to False, the layer will not learn an additive bias. Default: True
Shape:
- Input: :math:`(N, in\_features)`
- Output: :math:`(N, out\_features)`
Attributes:
weight: the learnable weights of the module of shape (out_features x in_features)
bias: the learnable bias of the module of shape (out_features)
Examples::
>>> m = nn.Linear(20, 30)
>>> input = autograd.Variable(torch.randn(128, 20))
>>> output = m(input)
>>> print(output.size())
"""
def __init__(self, in_features, out_features, bias=True):
super(Linear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.Tensor(out_features, in_features))
if bias:
self.bias = Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
if self.bias is None:
return self._backend.Linear()(input, self.weight)
else:
return self._backend.Linear()(input, self.weight, self.bias)
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
# TODO: Bilinear
# TODO: PartialLinear - maybe in sparse?
| bsd-3-clause | -3,095,174,257,862,721,500 | 29.698413 | 89 | 0.592037 | false |
Huyuwei/tvm | python/setup.py | 1 | 5411 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, exec-used
"""Setup TVM package."""
from __future__ import absolute_import
import os
import shutil
import sys
import sysconfig
import platform
from setuptools import find_packages
from setuptools.dist import Distribution
# need to use distutils.core for correct placement of cython dll
if "--inplace" in sys.argv:
from distutils.core import setup
from distutils.extension import Extension
else:
from setuptools import setup
from setuptools.extension import Extension
CURRENT_DIR = os.path.dirname(__file__)
def get_lib_path():
"""Get library path, name and version"""
# We can not import `libinfo.py` in setup.py directly since __init__.py
# Will be invoked which introduces dependences
libinfo_py = os.path.join(CURRENT_DIR, './tvm/_ffi/libinfo.py')
libinfo = {'__file__': libinfo_py}
exec(compile(open(libinfo_py, "rb").read(), libinfo_py, 'exec'), libinfo, libinfo)
version = libinfo['__version__']
if not os.getenv('CONDA_BUILD'):
lib_path = libinfo['find_lib_path']()
libs = [lib_path[0]]
if libs[0].find("runtime") == -1:
for name in lib_path[1:]:
if name.find("runtime") != -1:
libs.append(name)
break
else:
libs = None
return libs, version
LIB_LIST, __version__ = get_lib_path()
def config_cython():
"""Try to configure cython and return cython configuration"""
if os.name == 'nt':
print("WARNING: Cython is not supported on Windows, will compile without cython module")
return []
sys_cflags = sysconfig.get_config_var("CFLAGS")
if "i386" in sys_cflags and "x86_64" in sys_cflags:
print("WARNING: Cython library may not be compiled correctly with both i386 and x64")
return []
try:
from Cython.Build import cythonize
# from setuptools.extension import Extension
if sys.version_info >= (3, 0):
subdir = "_cy3"
else:
subdir = "_cy2"
ret = []
path = "tvm/_ffi/_cython"
if os.name == 'nt':
library_dirs = ['tvm', '../build/Release', '../build']
libraries = ['libtvm']
else:
library_dirs = None
libraries = None
for fn in os.listdir(path):
if not fn.endswith(".pyx"):
continue
ret.append(Extension(
"tvm._ffi.%s.%s" % (subdir, fn[:-4]),
["tvm/_ffi/_cython/%s" % fn],
include_dirs=["../include/",
"../3rdparty/dmlc-core/include",
"../3rdparty/dlpack/include",
],
library_dirs=library_dirs,
libraries=libraries,
language="c++"))
return cythonize(ret, compiler_directives={"language_level": 3})
except ImportError:
print("WARNING: Cython is not installed, will compile without cython module")
return []
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
def is_pure(self):
return False
include_libs = False
wheel_include_libs = False
if not os.getenv('CONDA_BUILD'):
if "bdist_wheel" in sys.argv:
wheel_include_libs = True
else:
include_libs = True
setup_kwargs = {}
# For bdist_wheel only
if wheel_include_libs:
with open("MANIFEST.in", "w") as fo:
for path in LIB_LIST:
shutil.copy(path, os.path.join(CURRENT_DIR, 'tvm'))
_, libname = os.path.split(path)
fo.write("include tvm/%s\n" % libname)
setup_kwargs = {
"include_package_data": True
}
if include_libs:
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
for i, path in enumerate(LIB_LIST):
LIB_LIST[i] = os.path.relpath(path, curr_path)
setup_kwargs = {
"include_package_data": True,
"data_files": [('tvm', LIB_LIST)]
}
setup(name='tvm',
version=__version__,
description="TVM: An End to End Tensor IR/DSL Stack for Deep Learning Systems",
zip_safe=False,
install_requires=[
'numpy',
'decorator',
'attrs',
'psutil',
],
packages=find_packages(),
distclass=BinaryDistribution,
url='https://github.com/dmlc/tvm',
ext_modules=config_cython(),
**setup_kwargs)
if wheel_include_libs:
# Wheel cleanup
os.remove("MANIFEST.in")
for path in LIB_LIST:
_, libname = os.path.split(path)
os.remove("tvm/%s" % libname)
| apache-2.0 | -311,008,582,766,743,360 | 32.196319 | 96 | 0.608575 | false |
andrewklau/openshift-tools | openshift/installer/vendored/openshift-ansible-3.5.13/filter_plugins/oo_filters.py | 2 | 41534 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
# pylint: disable=no-name-in-module, import-error, wrong-import-order, ungrouped-imports
"""
Custom filters for use in openshift-ansible
"""
import os
import pdb
import pkg_resources
import re
import json
import yaml
import random
from ansible import errors
from collections import Mapping
from distutils.util import strtobool
from distutils.version import LooseVersion
from operator import itemgetter
from ansible.parsing.yaml.dumper import AnsibleDumper
from urlparse import urlparse
from six import string_types
HAS_OPENSSL = False
try:
import OpenSSL.crypto
HAS_OPENSSL = True
except ImportError:
pass
try:
# ansible-2.2
# ansible.utils.unicode.to_unicode is deprecated in ansible-2.2,
# ansible.module_utils._text.to_text should be used instead.
from ansible.module_utils._text import to_text
except ImportError:
# ansible-2.1
from ansible.utils.unicode import to_unicode as to_text
def oo_pdb(arg):
""" This pops you into a pdb instance where arg is the data passed in
from the filter.
Ex: "{{ hostvars | oo_pdb }}"
"""
pdb.set_trace()
return arg
def get_attr(data, attribute=None):
""" This looks up dictionary attributes of the form a.b.c and returns
the value.
If the key isn't present, None is returned.
Ex: data = {'a': {'b': {'c': 5}}}
attribute = "a.b.c"
returns 5
"""
if not attribute:
raise errors.AnsibleFilterError("|failed expects attribute to be set")
ptr = data
for attr in attribute.split('.'):
if attr in ptr:
ptr = ptr[attr]
else:
ptr = None
break
return ptr
def oo_flatten(data):
""" This filter plugin will flatten a list of lists
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects to flatten a List")
return [item for sublist in data for item in sublist]
def oo_merge_dicts(first_dict, second_dict):
""" Merge two dictionaries where second_dict values take precedence.
Ex: first_dict={'a': 1, 'b': 2}
second_dict={'b': 3, 'c': 4}
returns {'a': 1, 'b': 3, 'c': 4}
"""
if not isinstance(first_dict, dict) or not isinstance(second_dict, dict):
raise errors.AnsibleFilterError("|failed expects to merge two dicts")
merged = first_dict.copy()
merged.update(second_dict)
return merged
def oo_merge_hostvars(hostvars, variables, inventory_hostname):
""" Merge host and play variables.
When ansible version is greater than or equal to 2.0.0,
merge hostvars[inventory_hostname] with variables (ansible vars)
otherwise merge hostvars with hostvars['inventory_hostname'].
Ex: hostvars={'master1.example.com': {'openshift_variable': '3'},
'openshift_other_variable': '7'}
variables={'openshift_other_variable': '6'}
inventory_hostname='master1.example.com'
returns {'openshift_variable': '3', 'openshift_other_variable': '7'}
hostvars=<ansible.vars.hostvars.HostVars object> (Mapping)
variables={'openshift_other_variable': '6'}
inventory_hostname='master1.example.com'
returns {'openshift_variable': '3', 'openshift_other_variable': '6'}
"""
if not isinstance(hostvars, Mapping):
raise errors.AnsibleFilterError("|failed expects hostvars is dictionary or object")
if not isinstance(variables, dict):
raise errors.AnsibleFilterError("|failed expects variables is a dictionary")
if not isinstance(inventory_hostname, string_types):
raise errors.AnsibleFilterError("|failed expects inventory_hostname is a string")
# pylint: disable=no-member
ansible_version = pkg_resources.get_distribution("ansible").version
merged_hostvars = {}
if LooseVersion(ansible_version) >= LooseVersion('2.0.0'):
merged_hostvars = oo_merge_dicts(
hostvars[inventory_hostname], variables)
else:
merged_hostvars = oo_merge_dicts(
hostvars[inventory_hostname], hostvars)
return merged_hostvars
def oo_collect(data, attribute=None, filters=None):
""" This takes a list of dict and collects all attributes specified into a
list. If filter is specified then we will include all items that
match _ALL_ of filters. If a dict entry is missing the key in a
filter it will be excluded from the match.
Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
{'a':2, 'z': 'z'}, # True, return
{'a':3, 'z': 'z'}, # True, return
{'a':4, 'z': 'b'}, # FAILED, obj['z'] != obj['z']
]
attribute = 'a'
filters = {'z': 'z'}
returns [1, 2, 3]
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects to filter on a List")
if not attribute:
raise errors.AnsibleFilterError("|failed expects attribute to be set")
if filters is not None:
if not isinstance(filters, dict):
raise errors.AnsibleFilterError("|failed expects filter to be a"
" dict")
retval = [get_attr(d, attribute) for d in data if (
all([d.get(key, None) == filters[key] for key in filters]))]
else:
retval = [get_attr(d, attribute) for d in data]
retval = [val for val in retval if val is not None]
return retval
def oo_select_keys_from_list(data, keys):
""" This returns a list, which contains the value portions for the keys
Ex: data = { 'a':1, 'b':2, 'c':3 }
keys = ['a', 'c']
returns [1, 3]
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects to filter on a list")
if not isinstance(keys, list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
# Gather up the values for the list of keys passed in
retval = [oo_select_keys(item, keys) for item in data]
return oo_flatten(retval)
def oo_select_keys(data, keys):
""" This returns a list, which contains the value portions for the keys
Ex: data = { 'a':1, 'b':2, 'c':3 }
keys = ['a', 'c']
returns [1, 3]
"""
if not isinstance(data, Mapping):
raise errors.AnsibleFilterError("|failed expects to filter on a dict or object")
if not isinstance(keys, list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
# Gather up the values for the list of keys passed in
retval = [data[key] for key in keys if key in data]
return retval
def oo_prepend_strings_in_list(data, prepend):
""" This takes a list of strings and prepends a string to each item in the
list
Ex: data = ['cart', 'tree']
prepend = 'apple-'
returns ['apple-cart', 'apple-tree']
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
if not all(isinstance(x, string_types) for x in data):
raise errors.AnsibleFilterError("|failed expects first param is a list"
" of strings")
retval = [prepend + s for s in data]
return retval
def oo_combine_key_value(data, joiner='='):
"""Take a list of dict in the form of { 'key': 'value'} and
arrange them as a list of strings ['key=value']
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
rval = []
for item in data:
rval.append("%s%s%s" % (item['key'], joiner, item['value']))
return rval
def oo_combine_dict(data, in_joiner='=', out_joiner=' '):
"""Take a dict in the form of { 'key': 'value', 'key': 'value' } and
arrange them as a string 'key=value key=value'
"""
if not isinstance(data, dict):
# pylint: disable=line-too-long
raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_combine_dict]. Got %s. Type: %s" % (str(data), str(type(data))))
return out_joiner.join([in_joiner.join([k, str(v)]) for k, v in data.items()])
def oo_dict_to_list_of_dict(data, key_title='key', value_title='value'):
"""Take a dict and arrange them as a list of dicts
Input data:
{'region': 'infra', 'test_k': 'test_v'}
Return data:
[{'key': 'region', 'value': 'infra'}, {'key': 'test_k', 'value': 'test_v'}]
Written for use of the oc_label module
"""
if not isinstance(data, dict):
# pylint: disable=line-too-long
raise errors.AnsibleFilterError("|failed expects first param is a dict. Got %s. Type: %s" % (str(data), str(type(data))))
rval = []
for label in data.items():
rval.append({key_title: label[0], value_title: label[1]})
return rval
def oo_ami_selector(data, image_name):
""" This takes a list of amis and an image name and attempts to return
the latest ami.
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
if not data:
return None
else:
if image_name is None or not image_name.endswith('_*'):
ami = sorted(data, key=itemgetter('name'), reverse=True)[0]
return ami['ami_id']
else:
ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data]
ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0]
return ami['ami_id']
def oo_ec2_volume_definition(data, host_type, docker_ephemeral=False):
""" This takes a dictionary of volume definitions and returns a valid ec2
volume definition based on the host_type and the values in the
dictionary.
The dictionary should look similar to this:
{ 'master':
{ 'root':
{ 'volume_size': 10, 'device_type': 'gp2',
'iops': 500
},
'docker':
{ 'volume_size': 40, 'device_type': 'gp2',
'iops': 500, 'ephemeral': 'true'
}
},
'node':
{ 'root':
{ 'volume_size': 10, 'device_type': 'io1',
'iops': 1000
},
'docker':
{ 'volume_size': 40, 'device_type': 'gp2',
'iops': 500, 'ephemeral': 'true'
}
}
}
"""
if not isinstance(data, dict):
# pylint: disable=line-too-long
raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_ec2_volume_def]. Got %s. Type: %s" % (str(data), str(type(data))))
if host_type not in ['master', 'node', 'etcd']:
raise errors.AnsibleFilterError("|failed expects etcd, master or node"
" as the host type")
root_vol = data[host_type]['root']
root_vol['device_name'] = '/dev/sda1'
root_vol['delete_on_termination'] = True
if root_vol['device_type'] != 'io1':
root_vol.pop('iops', None)
if host_type in ['master', 'node'] and 'docker' in data[host_type]:
docker_vol = data[host_type]['docker']
docker_vol['device_name'] = '/dev/xvdb'
docker_vol['delete_on_termination'] = True
if docker_vol['device_type'] != 'io1':
docker_vol.pop('iops', None)
if docker_ephemeral:
docker_vol.pop('device_type', None)
docker_vol.pop('delete_on_termination', None)
docker_vol['ephemeral'] = 'ephemeral0'
return [root_vol, docker_vol]
elif host_type == 'etcd' and 'etcd' in data[host_type]:
etcd_vol = data[host_type]['etcd']
etcd_vol['device_name'] = '/dev/xvdb'
etcd_vol['delete_on_termination'] = True
if etcd_vol['device_type'] != 'io1':
etcd_vol.pop('iops', None)
return [root_vol, etcd_vol]
return [root_vol]
def oo_split(string, separator=','):
""" This splits the input string into a list. If the input string is
already a list we will return it as is.
"""
if isinstance(string, list):
return string
return string.split(separator)
def oo_haproxy_backend_masters(hosts, port):
""" This takes an array of dicts and returns an array of dicts
to be used as a backend for the haproxy role
"""
servers = []
for idx, host_info in enumerate(hosts):
server = dict(name="master%s" % idx)
server_ip = host_info['openshift']['common']['ip']
server['address'] = "%s:%s" % (server_ip, port)
server['opts'] = 'check'
servers.append(server)
return servers
def oo_filter_list(data, filter_attr=None):
""" This returns a list, which contains all items where filter_attr
evaluates to true
Ex: data = [ { a: 1, b: True },
{ a: 3, b: False },
{ a: 5, b: True } ]
filter_attr = 'b'
returns [ { a: 1, b: True },
{ a: 5, b: True } ]
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects to filter on a list")
if not isinstance(filter_attr, string_types):
raise errors.AnsibleFilterError("|failed expects filter_attr is a str or unicode")
# Gather up the values for the list of keys passed in
return [x for x in data if filter_attr in x and x[filter_attr]]
def oo_nodes_with_label(nodes, label, value=None):
""" Filters a list of nodes by label and value (if provided)
It handles labels that are in the following variables by priority:
openshift_node_labels, cli_openshift_node_labels, openshift['node']['labels']
Examples:
data = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}},
'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}},
'c': {'openshift_node_labels': {'size': 'S'}}]
label = 'color'
returns = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}},
'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}}]
data = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}},
'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}},
'c': {'openshift_node_labels': {'size': 'S'}}]
label = 'color'
value = 'green'
returns = ['b': {'labels': {'color': 'green', 'size': 'L'}}]
Args:
nodes (list[dict]): list of node to node variables
label (str): label to filter `nodes` by
value (Optional[str]): value of `label` to filter by Defaults
to None.
Returns:
list[dict]: nodes filtered by label and value (if provided)
"""
if not isinstance(nodes, list):
raise errors.AnsibleFilterError("failed expects to filter on a list")
if not isinstance(label, string_types):
raise errors.AnsibleFilterError("failed expects label to be a string")
if value is not None and not isinstance(value, string_types):
raise errors.AnsibleFilterError("failed expects value to be a string")
def label_filter(node):
""" filter function for testing if node should be returned """
if not isinstance(node, dict):
raise errors.AnsibleFilterError("failed expects to filter on a list of dicts")
if 'openshift_node_labels' in node:
labels = node['openshift_node_labels']
elif 'cli_openshift_node_labels' in node:
labels = node['cli_openshift_node_labels']
elif 'openshift' in node and 'node' in node['openshift'] and 'labels' in node['openshift']['node']:
labels = node['openshift']['node']['labels']
else:
return False
if isinstance(labels, string_types):
labels = yaml.safe_load(labels)
if not isinstance(labels, dict):
raise errors.AnsibleFilterError(
"failed expected node labels to be a dict or serializable to a dict"
)
return label in labels and (value is None or labels[label] == value)
return [n for n in nodes if label_filter(n)]
def oo_parse_heat_stack_outputs(data):
""" Formats the HEAT stack output into a usable form
The goal is to transform something like this:
+---------------+-------------------------------------------------+
| Property | Value |
+---------------+-------------------------------------------------+
| capabilities | [] | |
| creation_time | 2015-06-26T12:26:26Z | |
| description | OpenShift cluster | |
| … | … |
| outputs | [ |
| | { |
| | "output_value": "value_A" |
| | "description": "This is the value of Key_A" |
| | "output_key": "Key_A" |
| | }, |
| | { |
| | "output_value": [ |
| | "value_B1", |
| | "value_B2" |
| | ], |
| | "description": "This is the value of Key_B" |
| | "output_key": "Key_B" |
| | }, |
| | ] |
| parameters | { |
| … | … |
+---------------+-------------------------------------------------+
into something like this:
{
"Key_A": "value_A",
"Key_B": [
"value_B1",
"value_B2"
]
}
"""
# Extract the “outputs” JSON snippet from the pretty-printed array
in_outputs = False
outputs = ''
line_regex = re.compile(r'\|\s*(.*?)\s*\|\s*(.*?)\s*\|')
for line in data['stdout_lines']:
match = line_regex.match(line)
if match:
if match.group(1) == 'outputs':
in_outputs = True
elif match.group(1) != '':
in_outputs = False
if in_outputs:
outputs += match.group(2)
outputs = json.loads(outputs)
# Revamp the “outputs” to put it in the form of a “Key: value” map
revamped_outputs = {}
for output in outputs:
revamped_outputs[output['output_key']] = output['output_value']
return revamped_outputs
# pylint: disable=too-many-branches
def oo_parse_named_certificates(certificates, named_certs_dir, internal_hostnames):
""" Parses names from list of certificate hashes.
Ex: certificates = [{ "certfile": "/root/custom1.crt",
"keyfile": "/root/custom1.key",
"cafile": "/root/custom-ca1.crt" },
{ "certfile": "custom2.crt",
"keyfile": "custom2.key",
"cafile": "custom-ca2.crt" }]
returns [{ "certfile": "/etc/origin/master/named_certificates/custom1.crt",
"keyfile": "/etc/origin/master/named_certificates/custom1.key",
"cafile": "/etc/origin/master/named_certificates/custom-ca1.crt",
"names": [ "public-master-host.com",
"other-master-host.com" ] },
{ "certfile": "/etc/origin/master/named_certificates/custom2.crt",
"keyfile": "/etc/origin/master/named_certificates/custom2.key",
"cafile": "/etc/origin/master/named_certificates/custom-ca-2.crt",
"names": [ "some-hostname.com" ] }]
"""
if not isinstance(named_certs_dir, string_types):
raise errors.AnsibleFilterError("|failed expects named_certs_dir is str or unicode")
if not isinstance(internal_hostnames, list):
raise errors.AnsibleFilterError("|failed expects internal_hostnames is list")
if not HAS_OPENSSL:
raise errors.AnsibleFilterError("|missing OpenSSL python bindings")
for certificate in certificates:
if 'names' in certificate.keys():
continue
else:
certificate['names'] = []
if not os.path.isfile(certificate['certfile']) or not os.path.isfile(certificate['keyfile']):
raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" %
(certificate['certfile'], certificate['keyfile']))
try:
st_cert = open(certificate['certfile'], 'rt').read()
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert)
certificate['names'].append(str(cert.get_subject().commonName.decode()))
for i in range(cert.get_extension_count()):
if cert.get_extension(i).get_short_name() == 'subjectAltName':
for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '):
certificate['names'].append(name)
except Exception:
raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] +
"please specify certificate names in host inventory"))
certificate['names'] = list(set(certificate['names']))
if 'cafile' not in certificate:
certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames]
if not certificate['names']:
raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] +
"detected a collision with internal hostname, please specify " +
"certificate names in host inventory"))
for certificate in certificates:
# Update paths for configuration
certificate['certfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['certfile']))
certificate['keyfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['keyfile']))
if 'cafile' in certificate:
certificate['cafile'] = os.path.join(named_certs_dir, os.path.basename(certificate['cafile']))
return certificates
def oo_pretty_print_cluster(data, prefix='tag_'):
""" Read a subset of hostvars and build a summary of the cluster
in the following layout:
"c_id": {
"master": {
"default": [
{ "name": "c_id-master-12345", "public IP": "172.16.0.1", "private IP": "192.168.0.1" }
]
"node": {
"infra": [
{ "name": "c_id-node-infra-23456", "public IP": "172.16.0.2", "private IP": "192.168.0.2" }
],
"compute": [
{ "name": "c_id-node-compute-23456", "public IP": "172.16.0.3", "private IP": "192.168.0.3" },
...
]
}
"""
def _get_tag_value(tags, key):
""" Extract values of a map implemented as a set.
Ex: tags = { 'tag_foo_value1', 'tag_bar_value2', 'tag_baz_value3' }
key = 'bar'
returns 'value2'
"""
for tag in tags:
if tag[:len(prefix) + len(key)] == prefix + key:
return tag[len(prefix) + len(key) + 1:]
raise KeyError(key)
def _add_host(clusters,
clusterid,
host_type,
sub_host_type,
host):
""" Add a new host in the clusters data structure """
if clusterid not in clusters:
clusters[clusterid] = {}
if host_type not in clusters[clusterid]:
clusters[clusterid][host_type] = {}
if sub_host_type not in clusters[clusterid][host_type]:
clusters[clusterid][host_type][sub_host_type] = []
clusters[clusterid][host_type][sub_host_type].append(host)
clusters = {}
for host in data:
try:
_add_host(clusters=clusters,
clusterid=_get_tag_value(host['group_names'], 'clusterid'),
host_type=_get_tag_value(host['group_names'], 'host-type'),
sub_host_type=_get_tag_value(host['group_names'], 'sub-host-type'),
host={'name': host['inventory_hostname'],
'public IP': host['oo_public_ipv4'],
'private IP': host['oo_private_ipv4']})
except KeyError:
pass
return clusters
def oo_generate_secret(num_bytes):
""" generate a session secret """
if not isinstance(num_bytes, int):
raise errors.AnsibleFilterError("|failed expects num_bytes is int")
secret = os.urandom(num_bytes)
return secret.encode('base-64').strip()
def to_padded_yaml(data, level=0, indent=2, **kw):
""" returns a yaml snippet padded to match the indent level you specify """
if data in [None, ""]:
return ""
try:
transformed = yaml.dump(data, indent=indent, allow_unicode=True,
default_flow_style=False,
Dumper=AnsibleDumper, **kw)
padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()])
return to_text("\n{0}".format(padded))
except Exception as my_e:
raise errors.AnsibleFilterError('Failed to convert: %s' % my_e)
def oo_openshift_env(hostvars):
''' Return facts which begin with "openshift_" and translate
legacy facts to their openshift_env counterparts.
Ex: hostvars = {'openshift_fact': 42,
'theyre_taking_the_hobbits_to': 'isengard'}
returns = {'openshift_fact': 42}
'''
if not issubclass(type(hostvars), dict):
raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
facts = {}
regex = re.compile('^openshift_.*')
for key in hostvars:
if regex.match(key):
facts[key] = hostvars[key]
migrations = {'openshift_router_selector': 'openshift_hosted_router_selector',
'openshift_registry_selector': 'openshift_hosted_registry_selector'}
for old_fact, new_fact in migrations.items():
if old_fact in facts and new_fact not in facts:
facts[new_fact] = facts[old_fact]
return facts
# pylint: disable=too-many-branches, too-many-nested-blocks
def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
""" Generate list of persistent volumes based on oo_openshift_env
storage options set in host variables.
"""
if not issubclass(type(hostvars), dict):
raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
if not issubclass(type(groups), dict):
raise errors.AnsibleFilterError("|failed expects groups is a dict")
if persistent_volumes is not None and not issubclass(type(persistent_volumes), list):
raise errors.AnsibleFilterError("|failed expects persistent_volumes is a list")
if persistent_volumes is None:
persistent_volumes = []
if 'hosted' in hostvars['openshift']:
for component in hostvars['openshift']['hosted']:
if 'storage' in hostvars['openshift']['hosted'][component]:
params = hostvars['openshift']['hosted'][component]['storage']
kind = params['kind']
create_pv = params['create_pv']
if kind is not None and create_pv:
if kind == 'nfs':
host = params['host']
if host is None:
if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
host = groups['oo_nfs_to_config'][0]
else:
raise errors.AnsibleFilterError("|failed no storage host detected")
directory = params['nfs']['directory']
volume = params['volume']['name']
path = directory + '/' + volume
size = params['volume']['size']
access_modes = params['access']['modes']
persistent_volume = dict(
name="{0}-volume".format(volume),
capacity=size,
access_modes=access_modes,
storage=dict(
nfs=dict(
server=host,
path=path)))
persistent_volumes.append(persistent_volume)
elif kind == 'openstack':
volume = params['volume']['name']
size = params['volume']['size']
access_modes = params['access']['modes']
filesystem = params['openstack']['filesystem']
volume_id = params['openstack']['volumeID']
persistent_volume = dict(
name="{0}-volume".format(volume),
capacity=size,
access_modes=access_modes,
storage=dict(
cinder=dict(
fsType=filesystem,
volumeID=volume_id)))
persistent_volumes.append(persistent_volume)
elif not (kind == 'object' or kind == 'dynamic'):
msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
kind,
component)
raise errors.AnsibleFilterError(msg)
return persistent_volumes
def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None):
""" Generate list of persistent volume claims based on oo_openshift_env
storage options set in host variables.
"""
if not issubclass(type(hostvars), dict):
raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
if persistent_volume_claims is not None and not issubclass(type(persistent_volume_claims), list):
raise errors.AnsibleFilterError("|failed expects persistent_volume_claims is a list")
if persistent_volume_claims is None:
persistent_volume_claims = []
if 'hosted' in hostvars['openshift']:
for component in hostvars['openshift']['hosted']:
if 'storage' in hostvars['openshift']['hosted'][component]:
params = hostvars['openshift']['hosted'][component]['storage']
kind = params['kind']
create_pv = params['create_pv']
create_pvc = params['create_pvc']
if kind not in [None, 'object'] and create_pv and create_pvc:
volume = params['volume']['name']
size = params['volume']['size']
access_modes = params['access']['modes']
persistent_volume_claim = dict(
name="{0}-claim".format(volume),
capacity=size,
access_modes=access_modes)
persistent_volume_claims.append(persistent_volume_claim)
return persistent_volume_claims
def oo_31_rpm_rename_conversion(rpms, openshift_version=None):
""" Filters a list of 3.0 rpms and return the corresponding 3.1 rpms
names with proper version (if provided)
If 3.1 rpms are passed in they will only be augmented with the
correct version. This is important for hosts that are running both
Masters and Nodes.
"""
if not isinstance(rpms, list):
raise errors.AnsibleFilterError("failed expects to filter on a list")
if openshift_version is not None and not isinstance(openshift_version, string_types):
raise errors.AnsibleFilterError("failed expects openshift_version to be a string")
rpms_31 = []
for rpm in rpms:
if 'atomic' not in rpm:
rpm = rpm.replace("openshift", "atomic-openshift")
if openshift_version:
rpm = rpm + openshift_version
rpms_31.append(rpm)
return rpms_31
def oo_pods_match_component(pods, deployment_type, component):
""" Filters a list of Pods and returns the ones matching the deployment_type and component
"""
if not isinstance(pods, list):
raise errors.AnsibleFilterError("failed expects to filter on a list")
if not isinstance(deployment_type, string_types):
raise errors.AnsibleFilterError("failed expects deployment_type to be a string")
if not isinstance(component, string_types):
raise errors.AnsibleFilterError("failed expects component to be a string")
image_prefix = 'openshift/origin-'
if deployment_type in ['enterprise', 'online', 'openshift-enterprise']:
image_prefix = 'openshift3/ose-'
elif deployment_type == 'atomic-enterprise':
image_prefix = 'aep3_beta/aep-'
matching_pods = []
image_regex = image_prefix + component + r'.*'
for pod in pods:
for container in pod['spec']['containers']:
if re.search(image_regex, container['image']):
matching_pods.append(pod)
break # stop here, don't add a pod more than once
return matching_pods
def oo_get_hosts_from_hostvars(hostvars, hosts):
""" Return a list of hosts from hostvars """
retval = []
for host in hosts:
try:
retval.append(hostvars[host])
except errors.AnsibleError:
# host does not exist
pass
return retval
def oo_image_tag_to_rpm_version(version, include_dash=False):
""" Convert an image tag string to an RPM version if necessary
Empty strings and strings that are already in rpm version format
are ignored. Also remove non semantic version components.
Ex. v3.2.0.10 -> -3.2.0.10
v1.2.0-rc1 -> -1.2.0
"""
if not isinstance(version, string_types):
raise errors.AnsibleFilterError("|failed expects a string or unicode")
if version.startswith("v"):
version = version[1:]
# Strip release from requested version, we no longer support this.
version = version.split('-')[0]
if include_dash and version and not version.startswith("-"):
version = "-" + version
return version
def oo_hostname_from_url(url):
""" Returns the hostname contained in a URL
Ex: https://ose3-master.example.com/v1/api -> ose3-master.example.com
"""
if not isinstance(url, string_types):
raise errors.AnsibleFilterError("|failed expects a string or unicode")
parse_result = urlparse(url)
if parse_result.netloc != '':
return parse_result.netloc
else:
# netloc wasn't parsed, assume url was missing scheme and path
return parse_result.path
# pylint: disable=invalid-name, unused-argument
def oo_openshift_loadbalancer_frontends(
api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
"""TODO: Document me."""
loadbalancer_frontends = [{'name': 'atomic-openshift-api',
'mode': 'tcp',
'options': ['tcplog'],
'binds': ["*:{0}".format(api_port)],
'default_backend': 'atomic-openshift-api'}]
if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
loadbalancer_frontends.append({'name': 'nuage-monitor',
'mode': 'tcp',
'options': ['tcplog'],
'binds': ["*:{0}".format(nuage_rest_port)],
'default_backend': 'nuage-monitor'})
return loadbalancer_frontends
# pylint: disable=invalid-name
def oo_openshift_loadbalancer_backends(
api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
"""TODO: Document me."""
loadbalancer_backends = [{'name': 'atomic-openshift-api',
'mode': 'tcp',
'option': 'tcplog',
'balance': 'source',
'servers': oo_haproxy_backend_masters(servers_hostvars, api_port)}]
if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
# pylint: disable=line-too-long
loadbalancer_backends.append({'name': 'nuage-monitor',
'mode': 'tcp',
'option': 'tcplog',
'balance': 'source',
'servers': oo_haproxy_backend_masters(servers_hostvars, nuage_rest_port)})
return loadbalancer_backends
def oo_chomp_commit_offset(version):
"""Chomp any "+git.foo" commit offset string from the given `version`
and return the modified version string.
Ex:
- chomp_commit_offset(None) => None
- chomp_commit_offset(1337) => "1337"
- chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15"
- chomp_commit_offset("v3.4.0.15") => "v3.4.0.15"
- chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0"
"""
if version is None:
return version
else:
# Stringify, just in case it's a Number type. Split by '+' and
# return the first split. No concerns about strings without a
# '+', .split() returns an array of the original string.
return str(version).split('+')[0]
def oo_random_word(length, source='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
"""Generates a random string of given length from a set of alphanumeric characters.
The default source uses [a-z][A-Z][0-9]
Ex:
- oo_random_word(3) => aB9
- oo_random_word(4, source='012') => 0123
"""
return ''.join(random.choice(source) for i in range(length))
class FilterModule(object):
""" Custom ansible filter mapping """
# pylint: disable=no-self-use, too-few-public-methods
def filters(self):
""" returns a mapping of filters to methods """
return {
"oo_select_keys": oo_select_keys,
"oo_select_keys_from_list": oo_select_keys_from_list,
"oo_chomp_commit_offset": oo_chomp_commit_offset,
"oo_collect": oo_collect,
"oo_flatten": oo_flatten,
"oo_pdb": oo_pdb,
"oo_prepend_strings_in_list": oo_prepend_strings_in_list,
"oo_ami_selector": oo_ami_selector,
"oo_ec2_volume_definition": oo_ec2_volume_definition,
"oo_combine_key_value": oo_combine_key_value,
"oo_combine_dict": oo_combine_dict,
"oo_dict_to_list_of_dict": oo_dict_to_list_of_dict,
"oo_split": oo_split,
"oo_filter_list": oo_filter_list,
"oo_parse_heat_stack_outputs": oo_parse_heat_stack_outputs,
"oo_parse_named_certificates": oo_parse_named_certificates,
"oo_haproxy_backend_masters": oo_haproxy_backend_masters,
"oo_pretty_print_cluster": oo_pretty_print_cluster,
"oo_generate_secret": oo_generate_secret,
"oo_nodes_with_label": oo_nodes_with_label,
"oo_openshift_env": oo_openshift_env,
"oo_persistent_volumes": oo_persistent_volumes,
"oo_persistent_volume_claims": oo_persistent_volume_claims,
"oo_31_rpm_rename_conversion": oo_31_rpm_rename_conversion,
"oo_pods_match_component": oo_pods_match_component,
"oo_get_hosts_from_hostvars": oo_get_hosts_from_hostvars,
"oo_image_tag_to_rpm_version": oo_image_tag_to_rpm_version,
"oo_merge_dicts": oo_merge_dicts,
"oo_hostname_from_url": oo_hostname_from_url,
"oo_merge_hostvars": oo_merge_hostvars,
"oo_openshift_loadbalancer_frontends": oo_openshift_loadbalancer_frontends,
"oo_openshift_loadbalancer_backends": oo_openshift_loadbalancer_backends,
"to_padded_yaml": to_padded_yaml,
"oo_random_word": oo_random_word
}
| apache-2.0 | -3,096,275,517,794,424,000 | 40.555556 | 149 | 0.55179 | false |
PyPlanet/PyPlanet | pyplanet/core/storage/storage.py | 1 | 3977 | import asyncio_extras
import os
import importlib
from async_generator import yield_
from pyplanet.conf import settings
from pyplanet.core.storage import StorageDriver, StorageInterface
class Storage(StorageInterface):
"""
The storage component manager is managing the storage access trough drivers that can be customized.
.. warning::
Some drivers are work in progress!
"""
MAP_FOLDER = 'UserData/Maps'
MATCHSETTINGS_FOLDER = 'UserData/Maps/MatchSettings'
def __init__(self, instance, driver: StorageDriver, config):
"""
Initiate storage manager.
:param instance: Instance of the controller.
:param driver: Driver instance, must be init already!
:param config: Storage configuration (including driver + driver config).
:type instance: pyplanet.core.instance.Instance
:type driver: pyplanet.core.storage.interface.StorageDriver
:type config: dict
"""
self._instance = instance
self._driver = driver
self._config = config
self._game = None
# Create temp folders for driver.
self._tmp_root = os.path.join(settings.TMP_PATH, self._instance.process_name)
self._tmp_driver = os.path.join(self._tmp_root, )
@classmethod
def create_from_settings(cls, instance, storage_config):
driver_path, _, driver_cls_name = storage_config['DRIVER'].rpartition('.')
driver_options = storage_config['OPTIONS'] if 'OPTIONS' in storage_config else dict()
driver_cls = getattr(importlib.import_module(driver_path), driver_cls_name)
driver = driver_cls(instance, driver_options)
return cls(instance, driver, storage_config)
async def initialize(self):
self._game = self._instance.game
self._driver.map_dir = self._game.server_map_dir
self._driver.skin_dir = self._game.server_skin_dir
self._driver.data_dir = self._game.server_data_dir
self._driver.base_dir = self._game.server_data_dir[:len(self._game.server_data_dir)-9]
@property
def driver(self):
"""
Get the raw driver. Be careful with this!
:return: Driver Instance
:rtype: pyplanet.core.storage.interface.StorageDriver
"""
return self._driver
@asyncio_extras.async_contextmanager
async def open(self, file: str, mode: str = 'rb', **kwargs):
"""
Open a file on the server. Use relative path to the dedicated root. Use the other open methods to relative
from another base path.
:param file: Filename/path, relative to the dedicated root path.
:param mode: Mode to open, see the python `open` manual for supported modes.
:return: File handler.
"""
context = self._driver.open(file, mode, **kwargs)
await yield_(await context.__aenter__())
await context.__aexit__(None, None, None)
@asyncio_extras.async_contextmanager
async def open_match_settings(self, file: str, mode: str = 'r', **kwargs):
"""
Open a file on the server. Relative to the MatchSettings folder (UserData/Maps/MatchSettings).
:param file: Filename/path, relative to the dedicated matchsettings folder.
:param mode: Mode to open, see the python `open` manual for supported modes.
:return: File handler.
"""
context = self._driver.open('{}/{}'.format(self.MATCHSETTINGS_FOLDER, file), mode, **kwargs)
await yield_(await context.__aenter__())
await context.__aexit__(None, None, None)
@asyncio_extras.async_contextmanager
async def open_map(self, file: str, mode: str = 'rb', **kwargs):
"""
Open a file on the server. Relative to the Maps folder (UserData/Maps).
:param file: Filename/path, relative to the dedicated maps folder.
:param mode: Mode to open, see the python `open` manual for supported modes.
:return: File handler.
"""
context = self._driver.open('{}/{}'.format(self.MAP_FOLDER, file), mode, **kwargs)
await yield_(await context.__aenter__())
await context.__aexit__(None, None, None)
async def remove_map(self, file: str):
"""
Remove a map file with filename given.
:param file: Filename, relative to Maps folder.
"""
await self._driver.remove('{}/{}'.format(self.MAP_FOLDER, file))
| gpl-3.0 | 4,460,439,379,608,528,000 | 33.885965 | 108 | 0.713603 | false |
Inspq/ansible | lib/ansible/modules/packaging/os/pacman.py | 1 | 15030 | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2012, Afterburn <http://github.com/afterburn>
# (c) 2013, Aaron Bull Schaefer <[email protected]>
# (c) 2015, Indrajit Raychaudhuri <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pacman
short_description: Manage packages with I(pacman)
description:
- Manage packages with the I(pacman) package manager, which is used by
Arch Linux and its variants.
version_added: "1.0"
author:
- "Indrajit Raychaudhuri (@indrajitr)"
- "'Aaron Bull Schaefer (@elasticdog)' <[email protected]>"
- "Afterburn"
notes: []
requirements: []
options:
name:
description:
- Name of the package to install, upgrade, or remove.
required: false
default: null
aliases: [ 'pkg', 'package' ]
state:
description:
- Desired state of the package.
required: false
default: "present"
choices: ["present", "absent", "latest"]
recurse:
description:
- When removing a package, also remove its dependencies, provided
that they are not required by other packages and were not
explicitly installed by a user.
required: false
default: no
choices: ["yes", "no"]
version_added: "1.3"
force:
description:
- When removing package - force remove package, without any
checks. When update_cache - force redownload repo
databases.
required: false
default: no
choices: ["yes", "no"]
version_added: "2.0"
update_cache:
description:
- Whether or not to refresh the master package lists. This can be
run as part of a package installation or as a separate step.
required: false
default: no
choices: ["yes", "no"]
aliases: [ 'update-cache' ]
upgrade:
description:
- Whether or not to upgrade whole system
required: false
default: no
choices: ["yes", "no"]
version_added: "2.0"
'''
RETURN = '''
packages:
description: a list of packages that have been changed
returned: when upgrade is set to yes
type: list of strings
sample: ['package', 'other-package']
'''
EXAMPLES = '''
# Install package foo
- pacman:
name: foo
state: present
# Upgrade package foo
- pacman:
name: foo
state: latest
update_cache: yes
# Remove packages foo and bar
- pacman:
name: foo,bar
state: absent
# Recursively remove package baz
- pacman:
name: baz
state: absent
recurse: yes
# Run the equivalent of "pacman -Sy" as a separate step
- pacman:
update_cache: yes
# Run the equivalent of "pacman -Su" as a separate step
- pacman:
upgrade: yes
# Run the equivalent of "pacman -Syu" as a separate step
- pacman:
update_cache: yes
upgrade: yes
# Run the equivalent of "pacman -Rdd", force remove package baz
- pacman:
name: baz
state: absent
force: yes
'''
import shlex
import os
import re
import sys
def get_version(pacman_output):
"""Take pacman -Qi or pacman -Si output and get the Version"""
lines = pacman_output.split('\n')
for line in lines:
if 'Version' in line:
return line.split(':')[1].strip()
return None
def query_package(module, pacman_path, name, state="present"):
"""Query the package status in both the local system and the repository. Returns a boolean to indicate if the package is installed, a second boolean to indicate if the package is up-to-date and a third boolean to indicate whether online information were available"""
if state == "present":
lcmd = "%s -Qi %s" % (pacman_path, name)
lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
if lrc != 0:
# package is not installed locally
return False, False, False
# get the version installed locally (if any)
lversion = get_version(lstdout)
rcmd = "%s -Si %s" % (pacman_path, name)
rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
# get the version in the repository
rversion = get_version(rstdout)
if rrc == 0:
# Return True to indicate that the package is installed locally, and the result of the version number comparison
# to determine if the package is up-to-date.
return True, (lversion == rversion), False
# package is installed but cannot fetch remote Version. Last True stands for the error
return True, True, True
def update_package_db(module, pacman_path):
if module.params["force"]:
args = "Syy"
else:
args = "Sy"
cmd = "%s -%s" % (pacman_path, args)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return True
else:
module.fail_json(msg="could not update package db")
def upgrade(module, pacman_path):
cmdupgrade = "%s -Suq --noconfirm" % (pacman_path)
cmdneedrefresh = "%s -Qu" % (pacman_path)
rc, stdout, stderr = module.run_command(cmdneedrefresh, check_rc=False)
data = stdout.split('\n')
data.remove('')
packages = []
diff = {
'before': '',
'after': '',
}
if rc == 0:
regex = re.compile('(\w+) ((?:\S+)-(?:\S+)) -> ((?:\S+)-(?:\S+))')
b = []
a = []
for p in data:
m = regex.search(p)
packages.append(m.group(1))
if module._diff:
diff['before'] += "%s-%s\n" % (m.group(1), m.group(2))
diff['after'] += "%s-%s\n" % (m.group(1), m.group(3))
if module.check_mode:
module.exit_json(changed=True, msg="%s package(s) would be upgraded" % (len(data)), packages=packages, diff=diff)
rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False)
if rc == 0:
module.exit_json(changed=True, msg='System upgraded', packages=packages, diff=diff)
else:
module.fail_json(msg="Could not upgrade")
else:
module.exit_json(changed=False, msg='Nothing to upgrade', packages=packages)
def remove_packages(module, pacman_path, packages):
data = []
diff = {
'before': '',
'after': '',
}
if module.params["recurse"] or module.params["force"]:
if module.params["recurse"]:
args = "Rs"
if module.params["force"]:
args = "Rdd"
if module.params["recurse"] and module.params["force"]:
args = "Rdds"
else:
args = "R"
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
installed, updated, unknown = query_package(module, pacman_path, package)
if not installed:
continue
cmd = "%s -%s %s --noconfirm --noprogressbar" % (pacman_path, args, package)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to remove %s" % (package))
if module._diff:
d = stdout.split('\n')[2].split(' ')[2:]
for i, pkg in enumerate(d):
d[i] = re.sub('-[0-9].*$', '', d[i].split('/')[-1])
diff['before'] += "%s\n" % pkg
data.append('\n'.join(d))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c, diff=diff)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, pacman_path, state, packages, package_files):
install_c = 0
package_err = []
message = ""
data = []
diff = {
'before': '',
'after': '',
}
to_install_repos = []
to_install_files = []
for i, package in enumerate(packages):
# if the package is installed and state == present or state == latest and is up-to-date then skip
installed, updated, latestError = query_package(module, pacman_path, package)
if latestError and state == 'latest':
package_err.append(package)
if installed and (state == 'present' or (state == 'latest' and updated)):
continue
if package_files[i]:
to_install_files.append(package_files[i])
else:
to_install_repos.append(package)
if to_install_repos:
cmd = "%s -S %s --noconfirm --noprogressbar --needed" % (pacman_path, " ".join(to_install_repos))
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to install %s: %s" % (" ".join(to_install_repos), stderr))
data = stdout.split('\n')[3].split(' ')[2:]
data = [ i for i in data if i != '' ]
for i, pkg in enumerate(data):
data[i] = re.sub('-[0-9].*$', '', data[i].split('/')[-1])
if module._diff:
diff['after'] += "%s\n" % pkg
install_c += len(to_install_repos)
if to_install_files:
cmd = "%s -U %s --noconfirm --noprogressbar --needed" % (pacman_path, " ".join(to_install_files))
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to install %s: %s" % (" ".join(to_install_files), stderr))
data = stdout.split('\n')[3].split(' ')[2:]
data = [ i for i in data if i != '' ]
for i, pkg in enumerate(data):
data[i] = re.sub('-[0-9].*$', '', data[i].split('/')[-1])
if module._diff:
diff['after'] += "%s\n" % pkg
install_c += len(to_install_files)
if state == 'latest' and len(package_err) > 0:
message = "But could not ensure 'latest' state for %s package(s) as remote version could not be fetched." % (package_err)
if install_c > 0:
module.exit_json(changed=True, msg="installed %s package(s). %s" % (install_c, message), diff=diff)
module.exit_json(changed=False, msg="package(s) already installed. %s" % (message), diff=diff)
def check_packages(module, pacman_path, packages, state):
would_be_changed = []
diff = {
'before': '',
'after': '',
'before_header': '',
'after_header': ''
}
for package in packages:
installed, updated, unknown = query_package(module, pacman_path, package)
if ((state in ["present", "latest"] and not installed) or
(state == "absent" and installed) or
(state == "latest" and not updated)):
would_be_changed.append(package)
if would_be_changed:
if state == "absent":
state = "removed"
if module._diff and (state == 'removed'):
diff['before_header'] = 'removed'
diff['before'] = '\n'.join(would_be_changed) + '\n'
elif module._diff and ((state == 'present') or (state == 'latest')):
diff['after_header'] = 'installed'
diff['after'] = '\n'.join(would_be_changed) + '\n'
module.exit_json(changed=True, msg="%s package(s) would be %s" % (
len(would_be_changed), state), diff=diff)
else:
module.exit_json(changed=False, msg="package(s) already %s" % state, diff=diff)
def expand_package_groups(module, pacman_path, pkgs):
expanded = []
for pkg in pkgs:
cmd = "%s -Sgq %s" % (pacman_path, pkg)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
# A group was found matching the name, so expand it
for name in stdout.split('\n'):
name = name.strip()
if name:
expanded.append(name)
else:
expanded.append(pkg)
return expanded
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(aliases=['pkg', 'package'], type='list'),
state = dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']),
recurse = dict(default=False, type='bool'),
force = dict(default=False, type='bool'),
upgrade = dict(default=False, type='bool'),
update_cache = dict(default=False, aliases=['update-cache'], type='bool')
),
required_one_of = [['name', 'update_cache', 'upgrade']],
supports_check_mode = True)
pacman_path = module.get_bin_path('pacman', True)
p = module.params
# normalize the state parameter
if p['state'] in ['present', 'installed']:
p['state'] = 'present'
elif p['state'] in ['absent', 'removed']:
p['state'] = 'absent'
if p["update_cache"] and not module.check_mode:
update_package_db(module, pacman_path)
if not (p['name'] or p['upgrade']):
module.exit_json(changed=True, msg='Updated the package master lists')
if p['update_cache'] and module.check_mode and not (p['name'] or p['upgrade']):
module.exit_json(changed=True, msg='Would have updated the package cache')
if p['upgrade']:
upgrade(module, pacman_path)
if p['name']:
pkgs = expand_package_groups(module, pacman_path, p['name'])
pkg_files = []
for i, pkg in enumerate(pkgs):
if re.match(".*\.pkg\.tar(\.(gz|bz2|xz|lrz|lzo|Z))?$", pkg):
# The package given is a filename, extract the raw pkg name from
# it and store the filename
pkg_files.append(pkg)
pkgs[i] = re.sub('-[0-9].*$', '', pkgs[i].split('/')[-1])
else:
pkg_files.append(None)
if module.check_mode:
check_packages(module, pacman_path, pkgs, p['state'])
if p['state'] in ['present', 'latest']:
install_packages(module, pacman_path, p['state'], pkgs, pkg_files)
elif p['state'] == 'absent':
remove_packages(module, pacman_path, pkgs)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == "__main__":
main()
| gpl-3.0 | 989,851,401,062,312,200 | 32.178808 | 270 | 0.574784 | false |
kg-bot/SupyBot | plugins/Mailbox/plugin.py | 1 | 6690 | ###
# Copyright (c) 2005, Jeremiah Fincher
# Copyright (c) 2006, Jon Phillips
# Copyright (c) 2006, Creative Commons
# All rights reserved.
###
import time
import rfc822
import poplib
import textwrap
from cStringIO import StringIO as sio
import supybot.utils as utils
import supybot.world as world
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircmsgs as ircmsgs
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
from supybot.utils.iter import all
class Mailbox(callbacks.Privmsg):
"""Add the help for "@help Mailbox" here
This should describe *how* to use this plugin."""
"""
Module for checking a POP3 mailbox at a specified interval and posting it
to a specified chat channel.
"""
threaded = True
lastCheck = 0
# This provides a callback to self
def callCommand(self, method, irc, msg, *args, **kwargs):
try:
super(Mailbox, self).callCommand(method, irc, msg, *args, **kwargs)
except utils.web.Error, e:
irc.error(str(e))
def _checkServer(self, irc):
user = self.registryValue('user')
server = self.registryValue('server')
password = self.registryValue('password')
if not server:
raise callbacks.Error, 'There is no configured POP3 server.'
if not user:
raise callbacks.Error, 'There is no configured POP3 user.'
if not password:
raise callbacks.Error, 'There is no configured POP3 password.'
return (server, user, password)
def _connect(self, server, user, password):
pop = poplib.POP3(server)
pop.user(user)
pop.pass_(password)
return pop
def _getPop(self, irc):
return self._connect(*self._checkServer(irc))
def _getMsgs(self, pop):
n = len(pop.list()[1])
for i in range(1, n+1):
(_, lines, _) = pop.retr(i)
yield (i, '\r\n'.join(lines))
def _quit(self, pop, delete=True):
if delete:
n = len(pop.list()[1])
for i in range(1, n+1):
pop.dele(i)
pop.quit()
def __call__(self, irc, msg):
now = time.time()
if now - self.lastCheck > self.registryValue('period'):
try:
try:
t = world.SupyThread(target=self._checkForAnnouncements,
args=(irc,))
t.setDaemon(True)
t.start()
finally:
# If there's an error, we don't want to be checking every
# message.
self.lastCheck = now
except callbacks.Error, e:
self.log.warning('Couldn\'t check mail: %s', e)
except Exception:
self.log.exception('Uncaught exception checking for new mail:')
def _checkForAnnouncements(self, irc):
start = time.time()
self.log.info('Checking mailbox for announcements.')
pop = self._getPop(irc)
i = None
for (i, msg) in self._getMsgs(pop):
message = rfc822.Message(sio(msg))
frm = message.get('From')
if not frm:
self.log.warning('Received message without From header.')
continue
else:
frm = frm.rstrip()
subject = message.get('Subject', '').rstrip()
content = message.fp.read()
self.log.info('Received message with subject %q from %q.',
subject, frm)
if subject == 'all':
channels = list(irc.state.channels)
else:
channels = subject.split()
if not channels or not all(irc.isChannel, channels):
channels = list(self.registryValue('defaultChannels'))
if subject:
content = '%s: %s' % (subject, content)
if not channels:
self.log.info('Received message with improper subject '
'line from %s.', frm)
continue
prefix = self.registryValue('prefix')
content = utils.str.normalizeWhitespace(content)
self.log.info('Making announcement to %L.', channels)
chunks = textwrap.wrap(content, 350)
for channel in channels:
if channel in irc.state.channels:
maximum = self.registryValue('limit', channel)
for chunk in chunks[:maximum]:
s = self._formatChunk(
self._formatPrefix(prefix + " ")+chunk)
irc.queueMsg(ircmsgs.privmsg(channel, s))
prefix = ''
self._quit(pop)
self.log.info('Finished checking mailbox, time elapsed: %s',
utils.timeElapsed(time.time() - start))
# provides formatting for the prefix option
def _formatPrefix(self, s):
fancyprefix = self.registryValue('fancyprefix')
if fancyprefix:
return ircutils.bold(s)
else:
return s
# provides formatting for the email message
def _formatChunk(self, s):
fancystyle = self.registryValue('fancystyle')
if fancystyle:
return ircutils.bold(ircutils.mircColor(s, 'red'))
else:
return s
def check(self, irc, msg, args):
"""takes no arguments
Checks whether email is available at the configured mailbox.
"""
(server, user, password) = self._checkServer(irc)
pop = self._connect(server, user, password)
n = len(pop.list()[1])
irc.reply(format('I have %n waiting for me.', (n, 'message')))
def retrieve(self, irc, msg, args):
"""takes no arguments
Retrieves the emails from the configured mailbox and prints them to
stdout.
"""
(server, user, password) = self._checkServer(irc)
pop = self._connect(server, user, password)
for (_, msg) in self._getMsgs(pop):
print msg
irc.replySuccess()
# this is what is called when one asks supybot about Mailbox
def mailbox(self, irc, msg, args, email):
"""[<email>]
This is where one will get information about a registered email
account <email>.
"""
# copied the next line from the Webopedia plugin
# self._wpBackend(irc, msg, term)
mailbox = wrap(mailbox, [additional('text')])
Class = Mailbox
# vim:set shiftwidth=4 softtabstop=8 expandtab textwidth=78:
| gpl-3.0 | 6,275,430,813,889,883,000 | 34.210526 | 79 | 0.557848 | false |
Tancata/phylo | test_for_lgt_more_groups.py | 1 | 6655 | from ete3 import Tree, TreeStyle
import sys, re
#read in the bootstrapped consensus tree from one of Cedric's families. Ask whether the candidate LGT has phylogenetic support at some bootstrap threshold by checking various tree-based criteria for LGTs
#Arguments: treefile target_sequence_tag
#euk_supergroups = ['Viridiplantae','Oxymonadida','Alveolata'] #add more...
euk_supergroups = []
inh = open("List_that_matters.txt")
for line in inh:
euk_supergroups.append(line.rstrip())
inh.close()
#check tree string for sanity first
inh = open(sys.argv[1])
treestring = inh.readline()
treestr = treestring.replace(';','')
treestr = treestr + ";"
inh.close()
if len(treestr) == 0:
print sys.argv[1] + "\tEmpty tree"
quit()
tree = Tree(treestr)
out_tree = sys.argv[1] + ".pdf"
#target_sequence_tag = sys.argv[2]
target_sequence_tag = 'xxx'
#setup group assignments
group_assignments = {}
inh = open("Annotation_file_for_trees.txt")
for line in inh:
fields = re.split("\s+", line.rstrip())
if len(fields) >= 2:
group_assignments[fields[0]] = fields[1] #key = sequence ID, value = group assignment (e.g. Viridiplantae)
#setup a list of the eukaryotic sequences in the tree
eukaryote_seqs = []
target_leaf = ''
for node in tree:
node.add_features(domain="Other")
for leaf in tree:
if re.search(target_sequence_tag, leaf.name):
leaf.add_features(domain="Eukaryote")
eukaryote_seqs.append(leaf.name)
target_leaf = leaf
elif leaf.name in group_assignments:
if group_assignments[leaf.name] in euk_supergroups:
eukaryote_seqs.append(leaf.name)
leaf.add_features(domain="Eukaryote")
else:
leaf.add_features(domain="Other")
else:
leaf.add_features(domain="Other")
#print eukaryote_seqs
#root the tree on a clade (the biggest?) of bacteria, to avoid ridiculous problems with arbitrary roots on trees
biggest_other_node = 0
for node in tree.get_monophyletic(values=['Other'], target_attr="domain"):
if len(node) > biggest_other_node:
biggest_other_node = len(node)
tree.set_outgroup(node)
#test the various phylogenetic criteria for LGT.
print "Tree\tResult\tEuksInTree\tSupportEukMonophyly\tEuksInTargetGroup\tDistanceToClosestEukClade\tSupergroupsInTargetGroup"
#euk sequence is a singleton nested within a clade of bacteria, and there is only one eukaryote sequence in the tree
if len(eukaryote_seqs) == 1: #this is, I guess, an LGT candidate
print sys.argv[1] + "\tSingleton\t1\tN/A\tN/A\tN/A\t1"
#euk sequence is a singleton nested within a clade of bacteria, and the eukaryotes are not monophyletic in the tree
#print len(eukaryote_seqs)
else:
try:
answer = tree.check_monophyly(values=eukaryote_seqs, target_attr="name")
if answer[0] == True:
ca = tree.get_common_ancestor(eukaryote_seqs)
target_group_sgs = {}
for leaf in ca:
if leaf.name in group_assignments:
leaf_supergroup = group_assignments[leaf.name]
if leaf_supergroup in euk_supergroups:
target_group_sgs[leaf_supergroup] = 1
else:
print "Warning: a sequence in this tree doesn't have a supergroup assignment: " + str(leaf.name)
num_sgs = len(target_group_sgs.keys())
print sys.argv[1] + "\tEuks monophyletic\t" + str(len(eukaryote_seqs)) + "\t" + str(ca.support) + "\tN/A\tN/A\t" + str(num_sgs)
elif answer[0] == False:
mono_groups = []
target_group = ''
for node in tree.get_monophyletic(values=['Eukaryote'], target_attr="domain"):
for leaf in node:
if leaf.name == target_leaf.name:
target_group = node
else:
mono_groups.append(node)
size_target_group = len(target_group)
#get distance
shortest_distance = 999999999999999.0
closest_other_group = ''
for subtree in mono_groups:
curr_distance = tree.get_distance(target_group, subtree, topology_only=True)
if curr_distance < shortest_distance:
shortest_distance = curr_distance
closest_other_group = subtree
#find out what supergroups of eukaryotes are represented in the target group
target_group_sgs = {}
tg_names = []
for leaf in target_group:
tg_names.append(leaf.name)
if leaf.name in group_assignments:
leaf_supergroup = group_assignments[leaf.name]
if leaf_supergroup in euk_supergroups:
target_group_sgs[leaf_supergroup] = 1
else:
print "Warning: a sequence in this tree doesn't have a supergroup assignment: " + str(leaf.name)
num_sgs = len(target_group_sgs.keys())
print tg_names
c_a = tree.get_common_ancestor(tg_names)
#attempt to calculate distance on a version of the tree in which branches below some support threshold have been deleted
# closest_leaves = []
# for leaf in closest_other_group:
# closest_leaves.append(leaf.name)
# target_leaves = []
# for leaf in target_group:
# target_leaves.append(leaf.name)
# collapsed_tree = tree
# for node in collapsed_tree:
# if node.support < 0.5:
# node.delete()
# target_ca = collapsed_tree.get_common_ancestor(target_leaves)
# closest_ca = collapsed_tree.get_common_ancestor(closest_leaves)
# collapsed_distance = collapsed_tree.get_distance(target_ca, closest_ca, topology_only=True)
print sys.argv[1] + "\tEuks not monophyletic\t" + str(len(eukaryote_seqs)) + "\t" + str(c_a.support) + "\t" + str(size_target_group) + "\t" + str(shortest_distance) + "\t" + str(num_sgs)
else:
print sys.argv[1] + "\t" + answer[0]
#If euks are monophyletic, what is the max. number allowed for the gene to be considered a candidate LGT?
#euk sequence is part of a euk clade nested within bacteria, and the eukaryotes are not monophyletic in the tree [what about the case where the LGT is the only copy in euks?]
#tree.render(out_tree)
except:
raise
#uncomment the following to make a PDF of the tree
ts = TreeStyle()
ts.show_leaf_name = True
ts.show_branch_support = True
ts.show_branch_length = False
tree.render(out_tree, tree_style=ts)
| mit | -7,968,757,924,115,821,000 | 43.66443 | 203 | 0.6284 | false |
taimur97/Feeder | server/flaskapp/feeder/rest.py | 1 | 7265 | # -*- coding: utf-8 -*-
'''
The REST-API of Feeder
'''
from feeder import app
from .database import db
from .models import (Feed, FeedItem, UserFeed, UserDeletion,
get_user, get_feed, get_userfeed)
#from flask_oauthlib.client import OAuth
from flask.ext.restful import (Resource, Api, reqparse, fields,
marshal_with)
from .util import parse_timestamp, datetime_to_string
from .sync import cache_feed
from .gauth import authorized
from datetime import datetime, timedelta
# Configure some logging
import logging
file_handler = logging.FileHandler('rest.log')
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
# Want a boolean class
class FieldBool(fields.Raw):
def format(self, value):
if value:
return 'true'
else:
return 'false'
# Parse dates properly
class FieldDateTime(fields.Raw):
def format(self, value):
if value is None:
return None
return datetime_to_string(value)
# Set up the REST API
api = Api(app)
# Set up argument parsers
## Listing feeds
getparser = reqparse.RequestParser()
getparser.add_argument('min_timestamp', type=str, required=False,
help='Timestamp to filter on (only newer)')
getparser.add_argument('link', type=str, required=False, action='append',
help='Url(s) to limit query for')
## Adding feed
postparser = reqparse.RequestParser()
postparser.add_argument('link', type=str, required=True,
help='URL to the feed')
postparser.add_argument('title', type=str, required=False,
help='Title of feed')
postparser.add_argument('tag', type=str, required=False,
help='Tag to categorize feed under')
## Deleting a feed
deleteparser = reqparse.RequestParser()
deleteparser.add_argument('link', type=str, required=True,
help='URL of the feed to delete')
# Set up return value mashers
## Get
### Single feed item
feeditem_fields = {
'title': fields.String,
'description': fields.String,
'link': fields.String,
'title_stripped': fields.String,
'snippet': fields.String,
'published': FieldDateTime,
'author': fields.String,
'comments': fields.String,
'enclosure': fields.String,
'tags': fields.List(fields.String),
'image': fields.String,
'read': FieldBool(default=False),
'json': fields.String
}
### Single feed with a possible list of items
feed_fields = {
'link': fields.String,
'title': fields.String,
'description': fields.String,
'published': FieldDateTime,
'tag': fields.String,
'timestamp': FieldDateTime,
'items': fields.List(fields.Nested(feeditem_fields))
}
### Single delete
delete_fields = {
'link': fields.String,
'timestamp': FieldDateTime
}
### Response with list of feeds, and list of deletes
feeds_response = {
'feeds': fields.List(fields.Nested(feed_fields)),
'deletes': fields.List(fields.Nested(delete_fields))
}
def log_errors(f):
'''Log errors in the wrapped function and re-raise them.'''
def wrapped_f(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
print(e)
app.logger.error(str(e))
raise e
return wrapped_f
class Feeds(Resource):
'''
This class is the entire REST-interface for dealing with feeds.
'''
@log_errors
@marshal_with(feeds_response)
@authorized
def get(self, userid):
'''Return all feeds'''
args = getparser.parse_args()
print("Getting user")
user = get_user(userid)
#Wrong
# Query for feeds using lazy relationship
q = user.feeds
dt = None
# Filters
if args['link'] is not None:
urls = [u for u in args['link']]
q = q.filter(Feed.link.in_(urls))
if args['min_timestamp'] is not None:
dt = parse_timestamp(args['min_timestamp'])
# Require a timestap. If one was not provided in decent form,
# default to x days ago
if dt is None:
dt = datetime.utcnow() - timedelta(days=7)
q = q.filter(Feed.timestamp > dt)
feeds = q.all()
for f in feeds:
# Make sure to only return items with correct timestamp
# Set the items on the outer object
if dt is None:
f.items = f.feed.items
else:
f.items = FeedItem.query.filter(FeedItem.timestamp > dt,
FeedItem.feed_id == f.feed.id).all()
# If we have a timestamp, also return deletes done
if args['min_timestamp'] is None:
deletes = []
else:
q = UserDeletion.query.filter(UserDeletion.timestamp > dt)
deletes = q.all()
return {"feeds": feeds, "deletes": deletes}
@log_errors
@marshal_with(feed_fields)
@authorized
def post(self, userid):
'''Add new/Edit feed'''
user = get_user(userid)
args = postparser.parse_args()
# Make sure feed exists
feed, new = get_feed(args.link, indicate_new=True)
if new:
cache_feed(feed)
# Set link between user and feed
userfeed = get_userfeed(user, feed, args.tag, args.title)
# Remove possible deletes
UserDeletion.query.\
filter_by(user_id=user.id).\
filter_by(link=feed.link).\
delete()
# If we should update tag or title
if userfeed.tag != args.tag or userfeed.title != args.title:
userfeed.tag = args.tag
userfeed.title = args.title
db.session.add(userfeed)
# Else, already saved
db.session.commit()
# TODO limit number of items instead of time
# TODO include read information
dt = datetime.utcnow() - timedelta(days=1)
userfeed.items = FeedItem.query.filter(FeedItem.timestamp > dt,
FeedItem.feed_id == feed.id)\
.all()
# Return feed
return userfeed
class FeedsDeleter(Resource):
@log_errors
@authorized
def post(self, userid):
'''Delete a feed'''
user = get_user(userid)
args = deleteparser.parse_args()
feed = Feed.query.filter_by(link=args.link).first()
if feed is None:
app.logger.error("No such feed: {}".format(args.link))
return None, 404
# Store delete for other devices
ud = UserDeletion(user, feed)
db.session.add(ud)
# Perform delete
UserFeed.query.\
filter_by(user_id=user.id).\
filter_by(feed_id=feed.id).\
delete()
db.session.commit()
return None, 204
class PingResponder(Resource):
'''
A method that allows the app to query if the server is alive.
'''
@log_errors
def get(self):
return {}, 200
# Connect with API URLs
api.add_resource(Feeds, '/feeds')
api.add_resource(FeedsDeleter, '/feeds/delete')
api.add_resource(PingResponder, '/ping')
| gpl-2.0 | 6,649,980,418,391,360,000 | 27.490196 | 84 | 0.591053 | false |
welchbj/tt | tt/tests/unit/expressions/test_bexpr_sat_all.py | 1 | 4209 | """Tests for expression sat_all functionality."""
from tt.errors import NoEvaluationVariationError
from tt.expressions import BooleanExpression as be
from ._helpers import ExpressionTestCase
class TestExpressionSatAll(ExpressionTestCase):
def test_only_constants_exprs_cause_exception(self):
"""Test that expressions of only constants cause exceptions."""
with self.assertRaises(NoEvaluationVariationError):
for solution in be('0').sat_all():
pass
with self.assertRaises(NoEvaluationVariationError):
for solution in be('1').sat_all():
pass
with self.assertRaises(NoEvaluationVariationError):
for solution in be('0 and (1 xor 0) and (1 -> 0 -> 1)').sat_all():
pass
def test_single_operand_expr(self):
"""Test a single-operand expression."""
b = be('A')
res = list(b.sat_all())
self.assertEqual(1, len(res))
self.assertEqual('A=1', str(res[0]))
def test_expr_with_some_constant_only_clauses(self):
"""Test an expression with some clauses of only constants."""
b = be('(A xor B) and (0 or 1) and ((1))')
res = list(str(sol) for sol in b.sat_all())
self.assertEqual(2, len(res))
self.assertIn('A=1, B=0', res)
self.assertIn('A=0, B=1', res)
def test_naturally_unsat_expr(self):
"""Test an expression that results in no solutions."""
b = be('(A and ~A) and (B or C)')
res = list(b.sat_all())
self.assertEqual(0, len(res))
def test_sat_expr_with_one_solution(self):
"""Test an expression that has only one solution."""
b = be('A and B and ~C and D')
res = list(str(sol) for sol in b.sat_all())
self.assertEqual(1, len(res))
self.assertIn('A=1, B=1, C=0, D=1', res)
def test_sat_expr_with_multiple_solutions(self):
"""Test an expression that has many solutions."""
b = be('A and (B xor C) and D')
res = list(str(sol) for sol in b.sat_all())
self.assertEqual(2, len(res))
self.assertIn('A=1, B=1, C=0, D=1', res)
self.assertIn('A=1, B=0, C=1, D=1', res)
def test_constraints_eliminate_all_solutions(self):
"""Test constraints that eliminate all possible solutions."""
b = be('(A xor B) and C')
with b.constrain(A=True, B=True):
res = list(b.sat_all())
self.assertEqual(0, len(res))
def test_constraints_eliminate_some_solutions(self):
"""Test constraints that only eliminate some solutions."""
b = be('A xor B xor C xor D')
with b.constrain(A=False, B=False):
res = list(str(sol) for sol in b.sat_all())
self.assertEqual(2, len(res))
self.assertIn('A=0, B=0, C=1, D=0', res)
self.assertIn('A=0, B=0, C=0, D=1', res)
def test_constraints_eliminate_no_solutions(self):
"""Test constraints that do not eliminate any possible solutions."""
b = be('(A xor 0) and (B or C or D)')
with b.constrain(A=1):
res = list(str(sol) for sol in b.sat_all())
self.assertEqual(7, len(res))
self.assertIn('A=1, B=0, C=0, D=1', res)
self.assertIn('A=1, B=0, C=1, D=0', res)
self.assertIn('A=1, B=0, C=1, D=1', res)
self.assertIn('A=1, B=1, C=0, D=0', res)
self.assertIn('A=1, B=1, C=0, D=1', res)
self.assertIn('A=1, B=1, C=1, D=0', res)
self.assertIn('A=1, B=1, C=1, D=1', res)
def test_all_symbols_constrained_yields_sat_solution(self):
"""Test constraining all symbols, resulting in a valid solution."""
b = be('(A <-> B) and (C or D)')
with b.constrain(A=1, B=1, C=1, D=0):
res = list(str(sol) for sol in b.sat_all())
self.assertEqual(1, len(res))
self.assertIn('A=1, B=1, C=1, D=0', res)
def test_all_symbols_constrained_yields_no_solutions(self):
"""Test constraining all symbols, resulting in no valid solutions."""
with be('A or B or C or D').constrain(A=0, B=0, C=0, D=0) as b:
res = list(str(sol) for sol in b.sat_all())
self.assertEqual(0, len(res))
| mit | 819,618,383,093,715,800 | 39.864078 | 78 | 0.577809 | false |
sparrow242/demandfs | demandfs/demandfs.py | 1 | 13810 | #!/usr/bin/env python
"""
demandfs.py - mount and umount sources on demand
Copyright (C) 2013 Sebastian Meyer <[email protected]>
Based upon the the xmp.py-FS Example in the fuse-python distribtion:
Copyright (C) 2001 Jeff Epler <[email protected]>
Copyright (C) 2006 Csaba Henk <[email protected]>
http://sourceforge.net/p/fuse/fuse-python/ci/master/tree/example/xmp.py
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see https://www.gnu.org/licenses/gpl-3.0.
"""
import errno
import fcntl
import subprocess
import sys
import threading
import time
import os
try:
import fuse
except ImportError as e:
print "Can't import the python fuse module."
print "If you use Linux, take a look into your repositories."
print "Mostly the package is known as python-fuse or fuse-python."
sys.exit(2)
fuse.fuse_python_api = (0, 2)
TIMER_CHECK_SECONDS = 30 # interval for the timer to check the fs for idle
STATE_LOCK = threading.Lock() # Lock to protect the mount-state of the fs
BACKDIR = None # Necessary global for the path to the backdir
VERBOSE = False
def verbose(message):
"""
Will print message only if VERBOSE is True
"""
if VERBOSE:
print message
class Timer(threading.Thread):
"""
Timer will check the idle-state of the Filesystem every
TIMER_CHECK_SECONDS seconds
"""
def __init__(self, dfs):
""" dfs: the instance of the DemandFileSystem """
threading.Thread.__init__(self)
self.dfs = dfs
self.run_thread = True
self.timer_event = threading.Event()
def run(self):
""" Thread loop to check the idle-state of the Filesystem """
while self.run_thread:
verbose("Timer checks for idle...")
STATE_LOCK.acquire()
if (dfs.backdir_is_mounted
and dfs.last_activity + dfs.timeout < time.time()):
dfs.umount_backdir()
STATE_LOCK.release()
self.timer_event.wait(TIMER_CHECK_SECONDS)
class DemandFS(fuse.Fuse):
"""
A Fuse-Layer between a mountpoint (where the FS is mounted) and another
directory (given as option backdir).
Every request will reset the timer.y
"""
def __init__(self, *args, **kw):
fuse.Fuse.__init__(self, *args, **kw)
self.backdir = None
self.timeout = 60
self.mountscript = None
self.umountscript = None
self.backdir_is_mounted = False
self.last_activity = time.time()
self.verbose = False
self.timer = None
def fsinit(self, *args):
self.timer = Timer(self)
self.timer.start()
def fsdestroy(self, *args):
verbose("fsdestroy called with args:" % args)
self.umount_backdir()
self.timer.run_thread = False
self.timer.timer_event.set()
def mount_backdir(self):
"""
Be sure you have acquired the STATE_LOCK before call this!
Calls the script to mount the backdir. If the script retuns a value
!= 0 we expect the backdir is not available.
"""
ret = self.run_script(self.mountscript)
if ret == 0:
self.backdir_is_mounted = True
def run_script(self, path):
""" Call this to run an external script """
try:
verbose("in try, want to run: %s " % path)
subprocess.check_output(path, stderr=subprocess.STDOUT)
#TODO: Log output here
return 0
except subprocess.CalledProcessError as e:
print "External script failed"
return e.returncode
def trigger_activity(self):
"""
Called everytime the filesystem is working. It mounts the
backdir if it is not mounted and renew the last_activity timestamp
"""
STATE_LOCK.acquire()
if not self.backdir_is_mounted:
self.mount_backdir()
if not self.backdir_is_mounted:
STATE_LOCK.release()
return False
self.last_activity = time.time()
STATE_LOCK.release()
return True
def umount_backdir(self):
"""
Be sure you have acquired the STATE_LOCK before call this!
Calls the script to mount the backdir. If the script retuns a value
> 0 we expect the backdir is still available, < 0 the backdir is
gone (but not mounted as planned, what is 0)
"""
if self.backdir_is_mounted:
ret = self.run_script(self.umountscript)
if ret == 0:
self.backdir_is_mounted = False
else:
# TODO: Log failure
print "Can't unmount the backdir"
# Methods for filesystem-operations:
def getattr(self, path):
verbose("gettattr path: %s" % path)
# don't call the mountscript if it is the root-dir.
# a "ls" in the parent dir would trigger the mount
if path == "/":
return os.lstat(self.backdir + path)
elif self.trigger_activity():
return os.lstat(self.backdir + path)
else:
return -errno.EIO
def readlink(self, path):
verbose("readlink path: %s" % path)
if self.trigger_activity():
return os.readlink(self.backdir + path)
else:
return -errno.EIO
def readdir(self, path, offset):
verbose("readdir path offst: %s %s" % (path, offset))
if not self.trigger_activity():
yield -errno.EIO
for e in os.listdir(self.backdir + path):
yield fuse.Direntry(e)
def unlink(self, path):
verbose("unlink path: %s" % path)
if self.trigger_activity():
os.unlink(self.backdir + path)
else:
return -errno.EIO
def rmdir(self, path):
verbose("rmdir: %s" % path)
if self.trigger_activity():
os.rmdir(self.backdir + path)
else:
return -errno.EIO
def symlink(self, path, path1):
verbose("symlink: %s %s" % (path, path1))
if self.trigger_activity():
os.symlink(path, self.backdir + path1)
else:
return -errno.EIO
def rename(self, path, path1):
verbose("rename path, path1: %s %s" % (path, path1))
if self.trigger_activity():
os.rename(self.backdir + path, self.backdir + path1)
else:
return -errno.EIO
def link(self, path, path1):
verbose("link path, path1): %s %s" % (path, path1))
if self.trigger_activity():
os.link(self.backdir + path, self.backdir + path1)
else:
return -errno.EIO
def chmod(self, path, mode):
verbose("chmod path, mode: %s %s" % (path, mode))
if self.trigger_activity():
os.chmod(self.backdir + path, mode)
else:
return -errno.EIO
def chown(self, path, user, group):
verbose("chown, path, user, group: %s %s %s" % (path, user, group))
if self.trigger_activity():
os.chown(self.backdir + path, user, group)
else:
return -errno.EIO
def truncate(self, path, len):
verbose("truncate: %s %s" % (path, len))
if self.trigger_activity():
f = open(self.backdir + path, "a")
f.truncate(len)
f.close()
else:
return -errno.EIO
def mknod(self, path, mode, dev):
verbose("mknot path, mode, dev: %s %s %s" % (path, mode, dev))
if self.trigger_activity():
os.mknod(self.backdir + path, mode, dev)
else:
return -errno.EIO
def mkdir(self, path, mode):
verbose("mkdir path, mode: %s %s" % (path, mode))
if self.trigger_activity():
os.mkdir(self.backdir + path, mode)
else:
return -errno.EIO
def utime(self, path, times):
verbose("utime path, times: %s %s" % (path, times))
if self.trigger_activity():
os.utime(self.backdir + path, times)
else:
return -errno.EIO
def access(self, path, mode):
verbose("access path, mode: %s %s" % (path, mode))
if self.trigger_activity():
if not os.access(self.backdir + path, mode):
return -EACCES
else:
return -errno.EIO
class DemandFile(object):
def __init__(self, path, flags, *mode):
self.keep_cache = False
self.direct_io = False
path = BACKDIR + path
verbose("init file with path: %s" % path)
self.file = os.fdopen(os.open(path, flags, *mode),
self.flag2mode(flags))
self.fd = self.file.fileno()
def flag2mode(self, flags):
md = {os.O_RDONLY: 'r', os.O_WRONLY: 'w', os.O_RDWR: 'w+'}
m = md[flags & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR)]
if flags | os.O_APPEND:
m = m.replace('w', 'a', 1)
return m
def read(self, length, offset):
verbose("file read length, offset: %s %s" % (length, offset))
if self.trigger_activity():
self.file.seek(offset)
return self.file.read(length)
else:
return -errno.EIO
def write(self, buf, offset):
verbose("file write buf, offset: %s %s" % (buf, offset))
if self.trigger_activity():
self.file.seek(offset)
self.file.write(buf)
return len(buf)
else:
return -errno.EIO
def release(self, flags):
verbose("file release flags: %s" % flags)
if self.trigger_activity():
self.file.close()
else:
return -errno.EIO
def _fflush(self):
verbose("_fflush!")
if self.trigger_activity():
if 'w' in self.file.mode or 'a' in self.file.mode:
self.file.flush()
else:
return -errno.EIO
def fsync(self, isfsyncfile):
verbose("file fsync isfsyncfile %s:" % isfsyncfile)
if self.trigger_activity():
self._fflush()
if isfsyncfile and hasattr(os, 'fdatasync'):
os.fdatasync(self.fd)
else:
os.fsync(self.fd)
else:
return -errno.EIO
def flush(self):
verbose("file flush")
if self.trigger_activity():
self._fflush()
os.close(os.dup(self.fd))
else:
return -errno.EIO
def fgetattr(self):
verbose("file fgetattr")
if self.trigger_activity():
return os.fstat(self.fd)
else:
return -errno.EIO
def ftruncate(self, len):
verbose("file ftruncate len: %s" % len)
if self.trigger_activity():
self.file.truncate(len)
else:
return -errno.EIO
def lock(self, cmd, owner, **kw):
verbose("file lock cmd, owner: %s %s" % (cmd, owner))
if self.trigger_activity():
op = { fcntl.F_UNLCK : fcntl.LOCK_UN,
fcntl.F_RDLCK : fcntl.LOCK_SH,
fcntl.F_WRLCK : fcntl.LOCK_EX }[kw['l_type']]
if cmd == fcntl.F_GETLK:
return -EOPNOTSUPP
elif cmd == fcntl.F_SETLK:
if op != fcntl.LOCK_UN:
op |= fcntl.LOCK_NB
elif cmd == fcntl.F_SETLKW:
pass
else:
return -errno.EINVAL
fcntl.lockf(self.fd, op, kw['l_start'], kw['l_len'])
else:
return -errno.EIO
def main(self, *a, **kw):
self.file_class = self.DemandFile
self.file_class.trigger_activity = self.trigger_activity
return fuse.Fuse.main(self, *a, **kw)
if __name__ == "__main__":
dfs = DemandFS()
dfs.flags = 0
dfs.multithreaded = 1
dfs.parser.add_option(mountopt="backdir", metavar="PATH",
help="path to the backdir.")
dfs.parser.add_option(mountopt="timeout", metavar="SEC",
help="timeout in sec. before unmount the backdir")
dfs.parser.add_option(mountopt="mountscript", metavar="PATH",
help="path to the script which do the mount")
dfs.parser.add_option(mountopt="umountscript", metavar="PATH",
help="path to the script which do the unmount")
dfs.parser.add_option(mountopt="verbose", metavar="True/False",
default=False, help="Activate verbose mode")
dfs.parse(values=dfs, errex=1)
if isinstance(dfs.verbose, str) and dfs.verbose.lower() == "true":
dfs.verbose = True
VERBOSE = True
dfs.timeout = int(dfs.timeout)
BACKDIR = dfs.backdir
dfs.main() | gpl-3.0 | -5,716,152,501,423,935,000 | 32.933661 | 79 | 0.547791 | false |
googleapis/googleapis-gen | google/cloud/dialogflow/cx/v3beta1/dialogflow-cx-v3beta1-py/google/cloud/dialogflowcx_v3beta1/types/fulfillment.py | 1 | 7242 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.dialogflowcx_v3beta1.types import response_message
from google.protobuf import struct_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.cloud.dialogflow.cx.v3beta1',
manifest={
'Fulfillment',
},
)
class Fulfillment(proto.Message):
r"""A fulfillment can do one or more of the following actions at the
same time:
- Generate rich message responses.
- Set parameter values.
- Call the webhook.
Fulfillments can be called at various stages in the
[Page][google.cloud.dialogflow.cx.v3beta1.Page] or
[Form][google.cloud.dialogflow.cx.v3beta1.Form] lifecycle. For
example, when a
[DetectIntentRequest][google.cloud.dialogflow.cx.v3beta1.DetectIntentRequest]
drives a session to enter a new page, the page's entry fulfillment
can add a static response to the
[QueryResult][google.cloud.dialogflow.cx.v3beta1.QueryResult] in the
returning
[DetectIntentResponse][google.cloud.dialogflow.cx.v3beta1.DetectIntentResponse],
call the webhook (for example, to load user data from a database),
or both.
Attributes:
messages (Sequence[google.cloud.dialogflowcx_v3beta1.types.ResponseMessage]):
The list of rich message responses to present
to the user.
webhook (str):
The webhook to call. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/webhooks/<Webhook ID>``.
return_partial_responses (bool):
Whether Dialogflow should return currently
queued fulfillment response messages in
streaming APIs. If a webhook is specified, it
happens before Dialogflow invokes webhook.
Warning:
1) This flag only affects streaming API.
Responses are still queued and returned once in
non-streaming API.
2) The flag can be enabled in any fulfillment
but only the first 3 partial responses will be
returned. You may only want to apply it to
fulfillments that have slow webhooks.
tag (str):
The tag used by the webhook to identify which fulfillment is
being called. This field is required if ``webhook`` is
specified.
set_parameter_actions (Sequence[google.cloud.dialogflowcx_v3beta1.types.Fulfillment.SetParameterAction]):
Set parameter values before executing the
webhook.
conditional_cases (Sequence[google.cloud.dialogflowcx_v3beta1.types.Fulfillment.ConditionalCases]):
Conditional cases for this fulfillment.
"""
class SetParameterAction(proto.Message):
r"""Setting a parameter value.
Attributes:
parameter (str):
Display name of the parameter.
value (google.protobuf.struct_pb2.Value):
The new value of the parameter. A null value
clears the parameter.
"""
parameter = proto.Field(
proto.STRING,
number=1,
)
value = proto.Field(
proto.MESSAGE,
number=2,
message=struct_pb2.Value,
)
class ConditionalCases(proto.Message):
r"""A list of cascading if-else conditions. Cases are mutually
exclusive. The first one with a matching condition is selected,
all the rest ignored.
Attributes:
cases (Sequence[google.cloud.dialogflowcx_v3beta1.types.Fulfillment.ConditionalCases.Case]):
A list of cascading if-else conditions.
"""
class Case(proto.Message):
r"""Each case has a Boolean condition. When it is evaluated to be
True, the corresponding messages will be selected and evaluated
recursively.
Attributes:
condition (str):
The condition to activate and select this case. Empty means
the condition is always true. The condition is evaluated
against [form parameters][Form.parameters] or [session
parameters][SessionInfo.parameters].
See the `conditions
reference <https://cloud.google.com/dialogflow/cx/docs/reference/condition>`__.
case_content (Sequence[google.cloud.dialogflowcx_v3beta1.types.Fulfillment.ConditionalCases.Case.CaseContent]):
A list of case content.
"""
class CaseContent(proto.Message):
r"""The list of messages or conditional cases to activate for
this case.
Attributes:
message (google.cloud.dialogflowcx_v3beta1.types.ResponseMessage):
Returned message.
additional_cases (google.cloud.dialogflowcx_v3beta1.types.Fulfillment.ConditionalCases):
Additional cases to be evaluated.
"""
message = proto.Field(
proto.MESSAGE,
number=1,
oneof='cases_or_message',
message=response_message.ResponseMessage,
)
additional_cases = proto.Field(
proto.MESSAGE,
number=2,
oneof='cases_or_message',
message='Fulfillment.ConditionalCases',
)
condition = proto.Field(
proto.STRING,
number=1,
)
case_content = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='Fulfillment.ConditionalCases.Case.CaseContent',
)
cases = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='Fulfillment.ConditionalCases.Case',
)
messages = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=response_message.ResponseMessage,
)
webhook = proto.Field(
proto.STRING,
number=2,
)
return_partial_responses = proto.Field(
proto.BOOL,
number=8,
)
tag = proto.Field(
proto.STRING,
number=3,
)
set_parameter_actions = proto.RepeatedField(
proto.MESSAGE,
number=4,
message=SetParameterAction,
)
conditional_cases = proto.RepeatedField(
proto.MESSAGE,
number=5,
message=ConditionalCases,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -547,233,568,092,019,840 | 35.39196 | 127 | 0.607705 | false |
darafferty/factor | factor/lib/operation.py | 1 | 16793 | """
General operation library
Contains the master class for all operations
"""
import os
import logging
import socket
import subprocess
import numpy as np
import sys
import uuid
from factor import _logging
from jinja2 import Environment, FileSystemLoader
from lofarpipe.support.utilities import create_directory
DIR = os.path.dirname(os.path.abspath(__file__))
env_parset = Environment(loader=FileSystemLoader(os.path.join(DIR, '..', 'pipeline',
'parsets')))
env_config = Environment(loader=FileSystemLoader(os.path.join(DIR, '..', 'pipeline')))
class Operation(object):
"""
Generic operation class
An operation is simply a generic pipeline that performs a part of the facet
calibration. The corresponding operation object holds the pipeline settings,
populates the pipeline config and parset templates, and updates the direction
object with variables needed by later operations.
Parameters
----------
parset : dict
Parset of operation
bands : list of Band objects
Bands for this operation
direction : Direction object
Direction for this operation
name : str, optional
Name of the operation
"""
def __init__(self, parset, bands, direction, name=None):
self.parset = parset.copy()
self.bands = bands
self.name = name.lower()
self.parset['op_name'] = name
self.direction = direction
_logging.set_level(self.parset['logging_level'])
self.log = logging.getLogger('factor:{0}'.format(self.name))
self.hostname = socket.gethostname()
self.node_list = parset['cluster_specific']['node_list']
# Working directory
self.factor_working_dir = parset['dir_working']
# Pipeline runtime and working dirs (pipeline makes subdir here with
# name of direction)
self.pipeline_runtime_dir = os.path.join(self.factor_working_dir, 'results',
self.name)
self.pipeline_working_dir = self.pipeline_runtime_dir
create_directory(self.pipeline_runtime_dir)
# Directory that holds the mapfiles
self.pipeline_mapfile_dir = os.path.join(self.pipeline_runtime_dir,
self.direction.name, 'mapfiles')
create_directory(self.pipeline_mapfile_dir)
# Directory in the runtime dir that holds parset and config files (also
# the results of the pipeline)
self.pipeline_parset_dir = os.path.join(self.pipeline_runtime_dir,
self.direction.name)
create_directory(self.pipeline_parset_dir)
# Directory that holds the mapfiles
self.pipeline_mapfile_dir = os.path.join(self.pipeline_runtime_dir,
self.direction.name, 'mapfiles')
create_directory(self.pipeline_mapfile_dir)
# Local scratch directories and corresponding node recipes
scratch_subdir = '{0}_{1}'.format(self.direction.name,
str(uuid.uuid4().get_hex()[0:6]))
if self.parset['cluster_specific']['dir_local'] is None:
# Not specified
self.local_scratch_dir = None
self.local_dir_parent = None
self.dppp_nodescript = 'executable_args'
elif self.parset['cluster_specific']['clusterdesc_file'].lower() == 'pbs':
# PBS: use special DPPP node script
self.local_scratch_dir = os.path.join(
self.parset['cluster_specific']['dir_local'], scratch_subdir)
self.local_dir_parent = self.parset['cluster_specific']['dir_local']
self.dppp_nodescript = 'dppp_scratch'
elif self.parset['cluster_specific']['clusterdesc_file'].lower() == 'slurm':
# SLURM: use special DPPP node script
self.local_scratch_dir = os.path.join(
self.parset['cluster_specific']['dir_local'], scratch_subdir)
self.local_dir_parent = self.parset['cluster_specific']['dir_local']
self.dppp_nodescript = 'dppp_scratch'
else:
# other: use given scratch directory and standard node script
self.local_scratch_dir = os.path.join(
self.parset['cluster_specific']['dir_local'], scratch_subdir)
self.local_dir_parent = self.parset['cluster_specific']['dir_local']
self.dppp_nodescript = 'executable_args'
if self.parset['cluster_specific']['dir_local_selfcal'] is None:
self.local_selfcal_scratch_dir = None
else:
self.local_selfcal_scratch_dir = os.path.join(
self.parset['cluster_specific']['dir_local_selfcal'], scratch_subdir)
# Directory that holds logs in a convenient place
self.log_dir = os.path.join(self.factor_working_dir, 'logs', self.name)
create_directory(self.log_dir)
# Log name used for logs in log_dir
self.logbasename = os.path.join(self.log_dir, self.direction.name)
# Below are paths for scripts, etc. in the Factor install directory
self.factor_root_dir = os.path.split(DIR)[0]
self.factor_pipeline_dir = os.path.join(self.factor_root_dir, 'pipeline')
self.factor_script_dir = os.path.join(self.factor_root_dir, 'scripts')
self.factor_parset_dir = os.path.join(self.factor_root_dir, 'parsets')
self.factor_skymodel_dir = os.path.join(self.factor_root_dir, 'skymodels')
# Below are the templates and output paths for the pipeline parset and
# config files. These may need to be re-defined in the subclasses
# if the operation has non-standard template names
self.pipeline_parset_template = '{0}_pipeline.parset'.format(self.name)
self.pipeline_parset_file = os.path.join(self.pipeline_parset_dir,
'pipeline.parset')
self.pipeline_config_template = 'pipeline.cfg'
self.pipeline_config_file = os.path.join(self.pipeline_parset_dir,
'pipeline.cfg')
# Define parameters needed for the pipeline config.
self.cfg_dict = {'lofarroot': parset['cluster_specific']['lofarroot'],
'pythonpath': parset['cluster_specific']['lofarpythonpath'],
'factorroot': self.factor_root_dir,
'pipeline_working_dir': self.pipeline_working_dir,
'pipeline_runtime_dir': self.pipeline_runtime_dir,
'wsclean_executable': parset['wsclean_executable'],
'image2fits_executable': parset['image2fits_executable'],
'dppp_nodescript': self.dppp_nodescript}
# Define global parameters needed by all pipeline parsets. Other,
# pipeline-specific, parameters should be defined in the subclasses by
# updating this dictionary
self.parms_dict = {'parset_dir': self.factor_parset_dir,
'skymodel_dir': self.factor_skymodel_dir,
'mapfile_dir': self.pipeline_mapfile_dir,
'pipeline_dir': self.factor_pipeline_dir,
'script_dir': self.factor_script_dir,
'local_dir': self.local_scratch_dir,
'local_dir_parent': self.local_dir_parent,
'selfcal_local_dir': self.local_selfcal_scratch_dir,
'pipeline_parset_dir': self.pipeline_parset_dir,
'hosts': self.node_list}
# Add cluster-related info
if self.parset['cluster_specific']['clustertype'] == 'local':
self.cfg_dict['remote'] = '[remote]\n'\
+ 'method = local\n'\
+ 'max_per_node = {0}\n'.format(self.parset['cluster_specific']['ncpu'])
elif self.parset['cluster_specific']['clustertype'] == 'juropa_slurm':
self.cfg_dict['remote'] = '[remote]\n'\
+ 'method = slurm_srun\n'\
+ 'max_per_node = {0}\n'.format(self.parset['cluster_specific']['ncpu'])
elif self.parset['cluster_specific']['clustertype'] == 'mpirun':
self.cfg_dict['remote'] = '[remote]\n'\
+ 'method = mpirun\n'\
+ 'max_per_node = {0}\n'.format(self.parset['cluster_specific']['ncpu'])
elif (self.parset['cluster_specific']['clustertype'] == 'pbs' or
self.parset['cluster_specific']['clustertype'] == 'slurm'):
self.cfg_dict['remote'] = ''
else:
self.log.error('Could not determine the nature of your cluster!')
sys.exit(1)
# an absolute path in ...['clusterdesc'] will overrule the "working_dir"
self.cfg_dict['clusterdesc'] = os.path.join(self.factor_working_dir,
self.parset['cluster_specific']['clusterdesc'])
def update_dicts(self):
"""
Update the dicts used for the pipeline parset templates
"""
self.cfg_dict.update(self.direction.__dict__)
self.parms_dict.update(self.direction.__dict__)
def setup(self):
"""
Set up this operation
This involves just filling the pipeline config and parset templates.
Generally, this does not need to be re-defined in the subclasses
unless the operation has non-standard template names
"""
# Update the dictionaries with the attributes of the operation's
# direction object. Any attributes set in the direction object that are
# also in the parms_dict will be set to those of the direction object
# (e.g., 'max_proc_per_node', which is set in the direction object by
# factor.cluster.divide_nodes() will override the value set above)
self.update_dicts()
self.pipeline_parset_template = env_parset.get_template(self.pipeline_parset_template)
tmp = self.pipeline_parset_template.render(self.parms_dict)
with open(self.pipeline_parset_file, 'w') as f:
f.write(tmp)
self.pipeline_config_template = env_config.get_template(self.pipeline_config_template)
tmp = self.pipeline_config_template.render(self.cfg_dict)
with open(self.pipeline_config_file, 'w') as f:
f.write(tmp)
def finalize(self):
"""
Finalize this operation
This should be defined in the subclasses if needed
"""
pass
def check_started(self):
"""
Checks whether operation has been started (but not necessarily
completed) before for this direction
Returns
-------
all_done : bool
True if operation was started on this direction
"""
has_state = self.direction.load_state()
if has_state:
if self.name in self.direction.started_operations:
return True
else:
return False
else:
return False
def check_completed(self):
"""
Checks whether operation has been run successfully before for this
direction
Returns
-------
all_done : bool
True if operation was successfully run on this direction
"""
has_state = self.direction.load_state()
if has_state:
if self.name in self.direction.completed_operations:
return True
else:
return False
else:
return False
def set_started(self):
"""
Sets the started state for the operation
"""
if self.name not in self.direction.started_operations:
self.direction.started_operations.append(self.name)
self.direction.save_state()
def set_completed(self):
"""
Sets the completed state for the operation
"""
if self.name not in self.direction.completed_operations:
self.direction.completed_operations.append(self.name)
self.direction.save_state()
def check_existing_files(self, mapfile):
"""
Checks if files in input mapfile exist
Parameters
----------
mapfile : str
Filename of mapfile to check
Returns
-------
all_exist : bool
True if all files in mapfile exist, False if not
"""
from lofarpipe.support.data_map import DataMap
all_exist = True
self.log.debug('Checking for existing files...')
try:
datamap = DataMap.load(mapfile)
for item in datamap:
# Handle case in which item.file is a Python list
if item.file[0] == '[' and item.file[-1] == ']':
files = item.file.strip('[]').split(',')
else:
files = [item.file]
for f in files:
if not os.path.exists(f):
all_exist = False
if all_exist:
self.log.debug('...all files exist')
else:
self.log.debug('...one or more files not found')
return all_exist
except IOError:
self.log.debug('Could not read mapfile {}. Skipping it'.format(mapfile))
return False
def can_restart(self):
"""
Checks the pipeline log for certain conditions that affect auto restarting
Returns
-------
can_restart : bool
True if pipeline log indicates an error for which auto restart is
possible
"""
logfile = self.logbasename + '.out.log'
can_restart = False
if os.path.exists(logfile):
# Read the last 20 lines and look for 'returncode 123456'
try:
with open(logfile, "rb") as f:
first = f.readline() # Read the first line.
f.seek(-10000, 2) # Jump back from end
while f.read(1) != b"\n": # Until EOL is found...
f.seek(-2, 1) # ...jump back the read byte plus one more.
last_lines = f.readlines() # Read last line.
for line in last_lines:
if 'returncode 123456' in line:
can_restart = True
break
except IOError:
can_restart = False
return can_restart
def get_steptypes(self):
"""
Returns the step types of completed pipeline steps
Returns
-------
steptypes : list
List of step types
"""
import pickle
statefile = os.path.join(self.pipeline_parset_dir, 'statefile')
if os.path.exists(statefile):
current_state = pickle.load(open(statefile, 'rb'))
steptypes = [item[0] for item in current_state[1]]
else:
steptypes = []
return steptypes
def reset_state_to_steptype(self, steptype):
"""
Resets the pipeline state to before the given steptype
Steptype is the type of the step as defined in the parset under
step.control.type
Parameters
----------
steptype : str
Step type from which to alter state
"""
import pickle
statefile = os.path.join(self.pipeline_parset_dir, 'statefile')
current_state = pickle.load(open(statefile, 'rb'))
steptypes = [item[0] for item in current_state[1]]
# delete steps from the first instance of the given step type
del_number = steptypes.index(steptype)
current_state[1] = current_state[1][:del_number]
pickle.dump(current_state, open(statefile, 'wb'))
def cleanup(self):
"""
Cleans up temp files in the scratch directories of each node
"""
if self.local_scratch_dir is not None:
for node in self.node_list:
if node == 'localhost':
cmd = ['rm', '-rf', self.local_scratch_dir]
else:
cmd = ['ssh', node, 'rm', '-rf', self.local_scratch_dir]
tmp = subprocess.call(cmd)
if self.local_selfcal_scratch_dir is not None:
for node in self.node_list:
if node == 'localhost':
cmd = ['rm', '-rf', self.local_selfcal_scratch_dir]
else:
cmd = ['ssh', node, 'rm', '-rf', self.local_selfcal_scratch_dir]
tmp = subprocess.call(cmd)
# Check whether we need to reset the pipeline state to before the sync step
steptypes = self.get_steptypes()
if 'sync_files' in steptypes and 'remove_synced_data' not in steptypes:
self.reset_state_to_steptype('sync_files')
| gpl-2.0 | -2,884,447,275,419,179,000 | 37.872685 | 94 | 0.581909 | false |
sampathweb/game_app | card_games/play_blackjack.py | 1 | 1843 | #!/usr/bin/env python
from __future__ import print_function
from blackjack import BlackJack
def play_blackjack(player):
game = BlackJack()
while True:
print('Your Hand %s is of value %d' % (game.player_hand, game.player_hand_value()))
action = raw_input('Enter: hit (1), stand (2) or split (3) or help (h): ').upper()
if action == '2': # Stand
result = game.game_result()
print('Dealer Hand %s is of value %d' % (game.dealer_hand, game.dealer_hand_value()))
print('Result is: ', result)
print('Round Over.')
return result
elif action == '1': # Hit
game.draw_card_player()
elif action == 'H': # Help
print('Your Hand Score is: ', game.player_hand_value())
print('You can Hit (1): Draw one more card to see if you get closer to 21, but not higher.')
print('You can Stand (2): Compare your current hand value with Dealer hand value to see if you scored higher, but still 21 or below.')
print('You can Split (3): ')
print('You can double down (4): ')
if __name__ == '__main__':
player = {}
player['chips'] = 100
player['round'] = 0
player['won'] = 0
player['lost'] = 0
player['push'] = 0
player['bust'] = 0
play = 'Y'
print('Welcome to BlackJack')
print('-' * 20)
print('You have 100 Chips to play this game. On each round, you will have to pitch atleast one chip. You can wager more.')
while play != 'N':
play = raw_input('Play a round of BlackJack (Y/N)? ').upper()
chips = raw_input('How many chips do you wager? (min 1, max %d): ' % player['chips'])
if play.upper() == 'Y':
player['round'] += 1
result = play_blackjack(player)
player[result] += 1
| mit | 2,510,335,775,484,046,300 | 40.886364 | 146 | 0.558871 | false |
chakki-works/arXivTimesIndicator | main.py | 1 | 1791 | import os
from PIL import Image
from arxivtimes_indicator.data.github import filter_issue_by_ym, fetch_issues, get_icon_url, tally_by_labels, tally_by_users
from arxivtimes_indicator.data.twitter import fetch_tweets, rank_paper
from arxivtimes_indicator.data.utils import download, break_line, std_score
from arxivtimes_indicator.visualization.visualize import save_bar_graph, save_graph_with_icon, save_text_graph
TEMPORARY = 'data'
REPORT = 'reports'
def fetch_images(user_names, issues):
images_urls = [get_icon_url(user_name, issues) for user_name in user_names]
image_paths = [os.path.join(TEMPORARY, '{}.png'.format(name)) for name in user_names]
[download(url, path) for url, path in zip(images_urls, image_paths)]
images = [Image.open(p) for p in image_paths]
return images
def main():
# Fetch Issues
issues = fetch_issues()
# Process Issues
filtered_issues = filter_issue_by_ym(issues)
label_names, label_counts = tally_by_labels(filtered_issues)
user_names, user_counts = tally_by_users(filtered_issues)
images = fetch_images(user_names, issues)
# Save label and user graph
label_fig_path = os.path.join(REPORT, 'labels.png')
users_fig_path = os.path.join(REPORT, 'users.png')
label_names = break_line(label_names)
save_bar_graph(label_names, label_counts, label_fig_path)
save_graph_with_icon(list(range(len(user_names))), user_counts, images, users_fig_path)
# Fetch tweets
tweets = fetch_tweets()
# Process tweets
n = 10 # number of top papers
scores, titles = rank_paper(tweets)
scores, titles = scores[:n], titles[:n]
# Save paper rank graph
path = os.path.join(REPORT, 'rank.png')
save_text_graph(titles, scores, path)
if __name__ == '__main__':
main() | apache-2.0 | -7,062,447,111,258,028,000 | 35.571429 | 124 | 0.698492 | false |
wkew/FTMSVisualization | 3-HeteroClassPlotter.py | 1 | 10441 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 22 11:42:36 2016
@author: Will Kew
[email protected]
Copyright Will Kew, 2016
This file is part of FTMS Visualisation (also known as i-van Krevelen).
FTMS Visualisation is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
FTMS Visualisation is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with FTMS Visualisation. If not, see <http://www.gnu.org/licenses/>.
This script will read in an assigned peaklist (example input file included) and calculate the heteroatomic class distribution.
The output is a vbar plot of heteroamtic class versus count. You can also have the calculated numbers output in a format for replotting.
This tool uses Seaborn - http://seaborn.pydata.org/
A number of (partially tested) other functions to plot output are included, though commented out.
This tool was used in our recent paper on Scotch Whisky - https://link.springer.com/article/10.1007/s13361-016-1513-y
The prompt for the user about whisky samples is thus borne from this - it also serves as an example of how to customise which classes to include.
"""
from __future__ import print_function # Python 2 compatibility
from __future__ import absolute_import # Python 2 compatibility
import os, sys
import pandas as pd
from collections import Counter
import matplotlib.pyplot as plt
import seaborn as sns
"""
# We import also the FTMSVizProcessingModule which contains a few useful functions.
# here we define where the scripts are stored.
# Make sure to change this to where you have saved these scripts.
"""
try: #test if running in ipython
__IPYTHON__
except NameError: #if not running in ipython....
import FTMSVizProcessingModule as FTPM
path = os.getcwd()+"data\\" #example data location
else: #if running in ipython
scriptlocation = "/LOCAL/FTMSVis/FTMSVisualization-master/"
sys.path.append(scriptlocation)
import FTMSVizProcessingModule as FTPM
path = "/LOCAL/FTMSVis/data/"
whisky = input("Are these Whisky samples - Y or N?" )
if whisky.upper() == "Y":
whisky = True
else:
whisky = False
inputpath = path +"OutputCSV/"
outputpath = path + "Images/Classes/"
FTPM.make_sure_path_exists(outputpath) #this function checks the output directory exists; if it doesnt, it creates it.
print("Looking for CSVs in " + inputpath)
filesA = os.listdir(inputpath)
filesB = []
for y in filesA:
if y[-8:] =="hits.csv" and y[-10:] != "nohits.csv" and y[-11:] !="isohits.csv":
filesB.append(y)
nfiles = len(filesB)
samplenames=[]
for x in filesB:
samplenames.append(x[:-9])
heteroclasses=[]
for z in filesB:
df1 = pd.read_csv(inputpath+z,index_col=0)
hetclas = df1["HeteroClass"]
hetclaslist = hetclas.tolist()
heteroclasses.append(hetclaslist)
heteroclasses = [item for sublist in heteroclasses for item in sublist]
hetclasset = list(set(heteroclasses))
indexlist = []
for i in samplenames:
for n in range(len(hetclasset)):
indexlist.append(i)
###This section is relevant to my whisky samples
if whisky == True:
columnnames = ["Sample","Class","WoodType","Region","Age","Peated","HeteroClass","HeteroClassCount"]
df4 = pd.read_csv(path+"SampleInfo-Dict.csv",index_col=0)
df4 = df4.T
dict4 = df4.to_dict()
outputdata = pd.DataFrame(index = range(len(indexlist)), columns=columnnames)
a = 0
for y in filesB:
df2 = pd.read_csv(inputpath+y,index_col=0)
counter = Counter(df2["HeteroClass"])
for x in counter:
outputdata.iloc[a][0] = y[:-9]
outputdata.iloc[a][1] = dict4[y[:-9]]["Class"]
outputdata.iloc[a][2] = dict4[y[:-9]]["Total Wood"]
outputdata.iloc[a][3] = dict4[y[:-9]]["Region"]
outputdata.iloc[a][4] = dict4[y[:-9]]["Age"]
outputdata.iloc[a][5] = dict4[y[:-9]]["Peated"]
outputdata.iloc[a][6] = x
outputdata.iloc[a][7] = counter[x]
a = a+1
outputdata = outputdata.dropna(how="all",axis=0)
else:
columnnames = ["Sample","Class","HeteroClass","HeteroClassCount"]
outputdata = pd.DataFrame(index = range(len(indexlist)), columns=columnnames)
a = 0
for y in filesB:
df2 = pd.read_csv(inputpath+y,index_col=0)
counter = Counter(df2["HeteroClass"])
for x in counter:
outputdata.iloc[a][0] = y[:-9]
outputdata.iloc[a][1] = y[:-9] #this is the Class variable, and should be defined as approrpriate for what you're plotting. In the case of single samples, it can be the sample name.
outputdata.iloc[a][2] = x
outputdata.iloc[a][3] = counter[x]
a = a+1
outputdata = outputdata.dropna(how="all",axis=0)
pd.to_numeric(outputdata["HeteroClassCount"],errors="raise")
saveoutputdata = input("Do you want to save the output data in a text file for later re-processing - Y or N? ")
if saveoutputdata.upper() == "Y":
outputdata.to_excel(inputpath+"HetClassByClass-longform.xlsx") #this saves the info out in a longform for plotting.
#outputdata = pd.read_excel(inputpath+"HetClassByClass-longform.xlsx") #this reads that data back in. Only necessary for manually re-running bits of script.
# This section creates a unique, naturally sorted list of heteroatom classes for plotting. Only really works for CHO formula.
# If you have exotic heteroatoms, will need to refigure this yourself, or just hardcode the order you want. easy to do in Excel.
order = outputdata["HeteroClass"].tolist()
order= list(set(order))
order.sort(key=FTPM.natural_sort_key) # this natural sort function ensures a logical order to your barplot.
if whisky == True:
CHOorder = ["O2","O3","O4","O5","O6","O7","O8","O9","O10","O11","O12","O13","O14","O15","O16","O17","O18","O19"]
Fullorder = ["O2","O3","O4","O5","O6","O7","O8","O9","O10","O11","O12","O13","O14","O15","O16","O17","O18",
"O19","O1S1","O2S1","O3S1","O4S1","O5S1","O6S1","O7S1","O8S1","O9S1","O10S1","O11S1","O12S1"]
CHOSorder =["O1S1","O2S1","O3S1","O4S1","O5S1","O6S1","O7S1","O8S1","O9S1","O10S1","O11S1","O12S1"]
CHOSorderNew = ["O2","O3","O4","O5","O6","O7","O8","O9","O10","O11","O12","O13","O14","O15","O16","O17","O18","O19","OnS"]
labels = ["O2","O3","O4","O5","O6","O7","O8","O9","O10","O11","O12","O13","O14","O15","O16","O17","O18","O19",r'O$\mathregular {_n}$S']
else:
df = outputdata
#colours = ["#a6cee3","#1f78b4","#b2df8a"] #colorblind and print friendly colours picked from http://colorbrewer2.org/
colours = ["#1b9e77","#d95f02","#7570b3"] #as above, but brighter
def barplot():
sns.set_style("white")
sns.set_context("paper",font_scale=2)
ax = sns.barplot(x="HeteroClass",y="HeteroClassCount",hue="Class",
data=outputdata,order=order,palette=sns.color_palette(colours))
ax.set(xlabel='Heteroatomic Class', ylabel='Count')
handles, labels = ax.get_legend_handles_labels()
if len(labels) == 1:
ax.legend_.remove()
sns.despine()
fig = ax.get_figure()
plt.xticks(rotation=90)
fig.set_size_inches(8, 6, forward=True)
fig.savefig(outputpath+"Barplot.png",dpi=600,bbox_inches="tight")
fig.savefig(outputpath+"Barplot.eps",dpi=600,bbox_inches="tight")
barplot() #plots a barplot.
"""
# Here are some further examples of the Seaborn Plotting library applied to this problem.
# Most of these rely on having many samples across a small number of classes you wish to compare
def violinplot():
sns.set_style("white")
sns.set_context("paper",font_scale=2)
ax = sns.violinplot(x="HeteroClass",y="HeteroClassCount",hue="Class",data=outputdata,
order=order,
palette=sns.color_palette("bright"),
split=False,bw="silverman",scale_hue=True,scale="width",
cut=2,linewidth=1.5,inner="quartiles",saturation=1)
ax.set(xlabel='Heteroatomic Class', ylabel='Count')
sns.despine()
fig = ax.get_figure()
locs, labels = plt.xticks()
plt.xticks(locs, labels, rotation=90)
cur_ylim = ax.get_ylim()
ax.set_ylim(0,cur_ylim[1])
fig.set_size_inches((POPM.mm2inch(171,80)), forward=True)
fig.savefig(outputpath+"violinplot-scalewidth.png",dpi=600,bbox_inches="tight")
fig.savefig(outputpath+"violinplot-scalewidth.eps",dpi=600,bbox_inches="tight")
def boxplot():
sns.set_style("white")
sns.set_context("paper",font_scale=2)
ax = sns.boxplot(x="HeteroClass",y="HeteroClassCount",hue="Class",data=outputdata,order=order,palette=sns.color_palette("bright"))
ax.set(xlabel='Heteroatomic Class', ylabel='Count')
sns.despine()
fig = ax.get_figure()
plt.xticks(rotation=90)
fig.set_size_inches(8, 6, forward=True)
fig.savefig(outputpath+"Boxplot-comparison-CHO-only.png",dpi=300,bbox_inches="tight")
def swarmplot():
sns.set_style("white")
sns.set_context("paper",font_scale=2)
ax = sns.swarmplot(x="HeteroClass",y="HeteroClassCount",hue="Class",data=outputdata,order=order,palette=sns.color_palette("bright"))
ax.set(xlabel='Heteroatomic Class', ylabel='Average Count')
sns.despine()
fig = ax.get_figure()
plt.xticks(rotation=90)
fig.set_size_inches(8, 6, forward=True)
fig.savefig(outputpath+"swarmplot-comparison-CHO-only.png",dpi=300,bbox_inches="tight")
def stripplot():
sns.set_style("white")
sns.set_context("paper",font_scale=2)
ax = sns.stripplot(x="HeteroClass",y="HeteroClassCount",hue="Class",data=outputdata,order=order,palette=sns.color_palette("bright"),jitter=False,split=True)
ax.set(xlabel='Heteroatomic Class', ylabel='Average Count')
sns.despine()
fig = ax.get_figure()
plt.xticks(rotation=90)
fig.set_size_inches(8, 6, forward=True)
fig.savefig(outputpath+"striplot-comparison-CHO-only.png",dpi=300,bbox_inches="tight")
"""
#EOF | gpl-3.0 | -3,138,422,466,994,427,400 | 42.690377 | 193 | 0.666507 | false |
Parkayun/flask | flask/debughelpers.py | 1 | 6024 | # -*- coding: utf-8 -*-
"""
flask.debughelpers
~~~~~~~~~~~~~~~~~~
Various helpers to make the development experience better.
:copyright: (c) 2016 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from ._compat import implements_to_string, text_type
from .app import Flask
from .blueprints import Blueprint
from .globals import _request_ctx_stack
class UnexpectedUnicodeError(AssertionError, UnicodeError):
"""Raised in places where we want some better error reporting for
unexpected unicode or binary data.
"""
@implements_to_string
class DebugFilesKeyError(KeyError, AssertionError):
"""Raised from request.files during debugging. The idea is that it can
provide a better error message than just a generic KeyError/BadRequest.
"""
def __init__(self, request, key):
form_matches = request.form.getlist(key)
buf = ['You tried to access the file "%s" in the request.files '
'dictionary but it does not exist. The mimetype for the request '
'is "%s" instead of "multipart/form-data" which means that no '
'file contents were transmitted. To fix this error you should '
'provide enctype="multipart/form-data" in your form.' %
(key, request.mimetype)]
if form_matches:
buf.append('\n\nThe browser instead transmitted some file names. '
'This was submitted: %s' % ', '.join('"%s"' % x
for x in form_matches))
self.msg = ''.join(buf)
def __str__(self):
return self.msg
class FormDataRoutingRedirect(AssertionError):
"""This exception is raised by Flask in debug mode if it detects a
redirect caused by the routing system when the request method is not
GET, HEAD or OPTIONS. Reasoning: form data will be dropped.
"""
def __init__(self, request):
exc = request.routing_exception
buf = ['A request was sent to this URL (%s) but a redirect was '
'issued automatically by the routing system to "%s".'
% (request.url, exc.new_url)]
# In case just a slash was appended we can be extra helpful
if request.base_url + '/' == exc.new_url.split('?')[0]:
buf.append(' The URL was defined with a trailing slash so '
'Flask will automatically redirect to the URL '
'with the trailing slash if it was accessed '
'without one.')
buf.append(' Make sure to directly send your %s-request to this URL '
'since we can\'t make browsers or HTTP clients redirect '
'with form data reliably or without user interaction.' %
request.method)
buf.append('\n\nNote: this exception is only raised in debug mode')
AssertionError.__init__(self, ''.join(buf).encode('utf-8'))
def attach_enctype_error_multidict(request):
"""Since Flask 0.8 we're monkeypatching the files object in case a
request is detected that does not use multipart form data but the files
object is accessed.
"""
oldcls = request.files.__class__
class newcls(oldcls):
def __getitem__(self, key):
try:
return oldcls.__getitem__(self, key)
except KeyError:
if key not in request.form:
raise
raise DebugFilesKeyError(request, key)
newcls.__name__ = oldcls.__name__
newcls.__module__ = oldcls.__module__
request.files.__class__ = newcls
def _dump_loader_info(loader):
yield 'class: %s.%s' % (type(loader).__module__, type(loader).__name__)
for key, value in sorted(loader.__dict__.items()):
if key.startswith('_'):
continue
if isinstance(value, (tuple, list)):
if not all(isinstance(x, (str, text_type)) for x in value):
continue
yield '%s:' % key
for item in value:
yield ' - %s' % item
continue
elif not isinstance(value, (str, text_type, int, float, bool)):
continue
yield '%s: %r' % (key, value)
def explain_template_loading_attempts(app, template, attempts):
"""This should help developers understand what failed"""
info = ['Locating template "%s":' % template]
total_found = 0
blueprint = None
reqctx = _request_ctx_stack.top
if reqctx is not None and reqctx.request.blueprint is not None:
blueprint = reqctx.request.blueprint
for idx, (loader, srcobj, triple) in enumerate(attempts):
if isinstance(srcobj, Flask):
src_info = 'application "%s"' % srcobj.import_name
elif isinstance(srcobj, Blueprint):
src_info = 'blueprint "%s" (%s)' % (srcobj.name,
srcobj.import_name)
else:
src_info = repr(srcobj)
info.append('% 5d: trying loader of %s' % (
idx + 1, src_info))
for line in _dump_loader_info(loader):
info.append(' %s' % line)
if triple is None:
detail = 'no match'
else:
detail = 'found (%r)' % (triple[1] or '<string>')
total_found += 1
info.append(' -> %s' % detail)
seems_fishy = False
if total_found == 0:
info.append('Error: the template could not be found.')
seems_fishy = True
elif total_found > 1:
info.append('Warning: multiple loaders returned a match for the template.')
seems_fishy = True
if blueprint is not None and seems_fishy:
info.append(' The template was looked up from an endpoint that '
'belongs to the blueprint "%s".' % blueprint)
info.append(' Maybe you did not place a template in the right folder?')
info.append(' See http://flask.pocoo.org/docs/blueprints/#templates')
app.logger.info('\n'.join(info))
| bsd-3-clause | -7,867,772,259,524,209,000 | 37.864516 | 83 | 0.586819 | false |
stpx/canto-curses | canto_curses/main.py | 1 | 9578 | # -*- coding: utf-8 -*-
#Canto-curses - ncurses RSS reader
# Copyright (C) 2014 Jack Miller <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
CANTO_PROTOCOL_COMPATIBLE = 0.9
from canto_next.client import CantoClient
from canto_next.plugins import try_plugins, set_program
from canto_next.rwlock import alllocks
from canto_next.hooks import call_hook
from .config import config, finalize_eval_settings
from .tagcore import tag_updater, alltagcores
from .gui import CantoCursesGui, GraphicalLog
from threading import Thread
from queue import Queue
import logging
logging.basicConfig(
format = "%(asctime)s : %(name)s -> %(message)s",
datefmt = "%H:%M:%S",
level = logging.INFO
)
log = logging.getLogger("CANTO-CURSES")
import traceback
import locale
import getopt
import signal
import errno
import fcntl
import time
import sys
import os
# It's the CantoCurses class' responsibility to provide the subsequent Gui
# object with a solid foundation with other components. This includes parsing
# command line arguments, starting a canto-daemon instance if necessary, signal
# handling, and wrapping the socket communication.
class CantoCurses(CantoClient):
def init(self):
# For good curses behavior.
locale.setlocale(locale.LC_ALL, '')
# Used for GUI-signalled death.
self.pid = os.getpid()
self.done = False
# Whether or not to append pid to logfile
# (debug option)
self.log_fname_pid = False
version = "canto-curses " + VERSION + " " + GIT_HASH
optl = self.common_args('hl', ["help"], version)
if optl == -1:
sys.exit(-1)
if self.args(optl):
sys.exit(-1)
rootlog = logging.getLogger()
rootlog.setLevel(max(rootlog.level - 10 * self.verbosity,0))
self.glog_handler = GraphicalLog()
try:
if self.port < 0:
# If we're running locally, ensure daemon is running
self.start_daemon()
CantoClient.__init__(self, self.socket_path)
else:
CantoClient.__init__(self, None,\
port = self.port, address = self.addr)
except Exception as e:
log.error("Error: %s" % e)
sys.exit(-1)
# __init__ above started one connection, start another
# for priority stuff.
self.connect()
# Make sure we have permissions on the relevant, non-daemon files in
# the target directory (None of these will be used until we set_log)
if self.ensure_paths():
sys.exit(-1)
self.set_log()
log.info(version)
# Evaluate anything in the target /plugins directory.
set_program("canto-curses")
self.plugin_errors = try_plugins(self.conf_dir, self.plugin_default, self.disabled_plugins,
self.enabled_plugins)
def print_help(self):
print("USAGE: canto-curses [options]")
print("\t-h/--help\tThis help")
print("\t-V/--version\tPrint version")
print("\t-v/\t\tVerbose logging (for debug)")
print("\t-D/--dir <dir>\tSet configuration directory.")
print("\t-l\t\tAppend pid to log file name")
print("\nPlugin control\n")
print("\t--noplugins\t\t\t\tDisable plugins")
print("\t--enableplugins 'plugin1 plugin2...'\tEnable single plugins (overrides --noplugins)")
print("\t--disableplugins 'plugin1 plugin2...'\tDisable single plugins")
print("\nNetwork control\n")
print("NOTE: These should be used in conjunction with SSH port forwarding to be secure\n")
print("\t-a/--address <IP>\tConnect to this address")
print("\t-p/--port <port>\tConnect to this port")
def args(self, optlist):
for opt, arg in optlist:
if opt in ["-h", "--help"]:
self.print_help()
return 1
elif opt in ["-l"]:
self.log_fname_pid = True
return 0
def winch(self, a = None, b = None):
if self.gui.alive:
self.gui.winch()
def sigusr1(self, a = None, b = None):
import threading
held_locks = {}
code = {}
curthreads = threading.enumerate()
for threadId, stack in sys._current_frames().items():
name = str(threadId)
for ct in curthreads:
if ct.ident == threadId:
name = ct.name
code[name] = ["NAME: %s" % name]
for filename, lineno, fname, line in traceback.extract_stack(stack):
code[name].append('FILE: "%s", line %d, in %s' % (filename, lineno, fname))
if line:
code[name].append(" %s" % (line.strip()))
held_locks[name] = ""
for lock in alllocks:
if lock.writer_id == threadId:
held_locks[name] += ("%s(w)" % lock.name)
continue
for reader_id, reader_stack in lock.reader_stacks:
if reader_id == threadId:
held_locks[name] += ("%s(r)" % lock.name)
for k in code:
log.info('\n\nLOCKS: %s \n%s' % (held_locks[k], '\n'.join(code[k])))
log.info("\n\nSTACKS:")
for lock in alllocks:
for (reader_id, reader_stack) in lock.reader_stacks:
log.info("Lock %s (%s readers)" % (lock.name, lock.readers))
log.info("Lock reader (thread %s):" % (reader_id,))
log.info(''.join(reader_stack))
for writer_stack in lock.writer_stacks:
log.info("Lock %s (%s readers)" % (lock.name, lock.readers))
log.info("Lock writer (thread %s):" % (lock.writer_id,))
log.info(''.join(writer_stack))
log.info("VARS: %s" % config.vars)
log.info("OPTS: %s" % config.config)
def child(self, a = None, b = None):
try:
while True:
pid, status = os.waitpid(-1, os.WNOHANG)
if pid == 0:
break
log.debug("CHLD %d has died: %d", pid, status)
except Exception as e:
if e.errno == errno.ECHILD:
log.debug("CHLD no children?")
else:
raise
def run(self):
# We want this as early as possible
signal.signal(signal.SIGUSR1, self.sigusr1)
# Get config from daemon
if not config.init(self, CANTO_PROTOCOL_COMPATIBLE):
print("Invalid daemon version")
print("Wanted: %s" % CANTO_PROTOCOL_COMPATIBLE)
print("Got: %s" % config.version)
sys.exit(-1)
else:
log.info("Version check passed: %s" % CANTO_PROTOCOL_COMPATIBLE)
# Create Tags for each TagCore
self.gui = CantoCursesGui(self, self.glog_handler)
tag_updater.init(self)
# Initial signal setup.
signal.signal(signal.SIGWINCH, self.winch)
signal.signal(signal.SIGCHLD, self.child)
finalize_eval_settings()
call_hook("curses_start", [])
if self.plugin_errors:
log.error("The following error occurred loading plugins:\n\n%s" % self.plugin_errors)
while self.gui.alive:
self.gui.tick()
time.sleep(1)
def ensure_paths(self):
if os.path.exists(self.conf_dir):
if not os.path.isdir(self.conf_dir):
log.error("Error: %s is not a directory." % self.conf_dir)
return -1
if not os.access(self.conf_dir, os.R_OK):
log.error("Error: %s is not readable." % self.conf_dir)
return -1
if not os.access(self.conf_dir, os.W_OK):
log.error("Error: %s is not writable." % self.conf_dir)
return -1
else:
try:
os.makedirs(self.conf_dir)
except Exception as e:
log.error("Exception making %s : %s" % (self.conf_dir, e))
return -1
return self.ensure_files()
def ensure_files(self):
logname = "curses-log"
if self.log_fname_pid:
logname += ".%d" % os.getpid()
for f in [ logname ] :
p = self.conf_dir + "/" + f
if os.path.exists(p):
if not os.path.isfile(p):
log.error("Error: %s is not a file." % p)
return -1
if not os.access(p, os.R_OK):
log.error("Error: %s is not readable." % p)
return -1
if not os.access(p, os.W_OK):
log.error("Error: %s is not writable." % p)
return -1
self.log_path = self.conf_dir + "/" + logname
def set_log(self):
f = open(self.log_path, "w")
os.dup2(f.fileno(), sys.stderr.fileno())
def start(self):
try:
self.init()
self.run()
except KeyboardInterrupt:
pass
except Exception as e:
tb = traceback.format_exc()
log.error("Exiting on exception:")
log.error("\n" + "".join(tb))
call_hook("curses_exit", [])
log.info("Exiting.")
sys.exit(0)
def __init__(self):
self.start()
| gpl-2.0 | -2,279,344,980,120,224,800 | 32.256944 | 102 | 0.546774 | false |
jgmanzanas/CMNT_004_15 | project-addons/sale_advance_payment/__openerp__.py | 1 | 1634 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Comunitea Servicios Tecnológicos All Rights Reserved
# $Omar Castiñeira Saaevdra <[email protected]>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Sale Advance Payment",
"version": "1.0",
"author": "Comunitea",
'website': 'www.counitea.com',
"category": "Sales",
"description": """Allow to add advance payments on sales and then use its
on invoices""",
"depends": ["base", "sale", "account_voucher",
"partner_risk__stock_reserve__rel"],
"data": ["wizard/sale_advance_payment_wzd_view.xml",
"sale_view.xml",
"wizard/apply_on_account_amount_view.xml",
"invoice_view.xml",
"partner_view.xml",
"security/ir.model.access.csv"],
"installable": True,
}
| agpl-3.0 | 1,341,334,413,204,334,000 | 40.846154 | 78 | 0.590074 | false |
voytekresearch/neurodsp | neurodsp/tests/aperiodic/test_irasa.py | 1 | 1456 | """Tests for IRASA functions."""
import numpy as np
from neurodsp.tests.settings import FS, N_SECONDS_LONG, EXP1
from neurodsp.sim import sim_combined
from neurodsp.spectral import compute_spectrum, trim_spectrum
from neurodsp.aperiodic.irasa import *
###################################################################################################
###################################################################################################
def test_compute_irasa(tsig_comb):
# Estimate periodic and aperiodic components with IRASA
f_range = [1, 30]
freqs, psd_ap, psd_pe = compute_irasa(tsig_comb, FS, f_range, noverlap=int(2*FS))
assert len(freqs) == len(psd_ap) == len(psd_pe)
# Compute r-squared for the full model, comparing to a standard power spectrum
_, powers = trim_spectrum(*compute_spectrum(tsig_comb, FS, nperseg=int(4*FS)), f_range)
r_sq = np.corrcoef(np.array([powers, psd_ap+psd_pe]))[0][1]
assert r_sq > .95
def test_fit_irasa(tsig_comb):
# Estimate periodic and aperiodic components with IRASA & fit aperiodic
freqs, psd_ap, _ = compute_irasa(tsig_comb, FS, noverlap=int(2*FS))
b0, b1 = fit_irasa(freqs, psd_ap)
assert round(b1) == EXP1
assert np.abs(b0 - np.log10((psd_ap)[0])) < 1
def test_fit_func():
freqs = np.arange(30)
intercept = -2
slope = -2
fit = fit_func(freqs, intercept, slope)
assert (fit == slope * freqs + intercept).all()
| mit | 5,145,470,417,897,858,000 | 32.090909 | 99 | 0.581044 | false |
tkrajina/cartesius | cartesius/colors.py | 1 | 1051 | # -*- coding: utf-8 -*-
""" Utility functions folr colors """
def get_color(color):
""" Can convert from integer to (r, g, b) """
if not color:
return None
if isinstance(color, int):
temp = color
blue = temp % 256
temp = int(temp / 256)
green = temp % 256
temp = int(temp / 256)
red = temp % 256
return (red, green, blue)
if not len(color) == 3:
raise Exception('Invalid color {0}'.format(color))
return color
def brighten(color, n):
return (int((color[0] + n) % 256), int((color[1] + n) % 256), int((color[2] + n) % 256))
def darken(color, n):
return brighten(color, -n)
def get_color_between(color1, color2, i):
""" i is a number between 0 and 1, if 0 then color1, if 1 color2, ... """
if i <= 0:
return color1
if i >= 1:
return color2
return (int(color1[0] + (color2[0] - color1[0]) * i),
int(color1[1] + (color2[1] - color1[1]) * i),
int(color1[2] + (color2[2] - color1[2]) * i))
| apache-2.0 | 6,686,300,613,034,056,000 | 24.634146 | 92 | 0.527117 | false |
ehooo/django_mqtt | test_web/settings.py | 1 | 4373 | """
Django settings for web project.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#b68qv#(v-g26k3qt_-1ufg-prvsw2p)7@ctea*n!36-w23bv1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
DB_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'django_mqtt',
'django_mqtt.mosquitto.auth_plugin',
'django_mqtt.publisher',
]
FIXTURE_DIRS = [
os.path.join(BASE_DIR, 'test_web', 'fixtures')
]
MIDDLEWARE = (
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'test_web.urls'
MQTT_CERTS_ROOT = os.path.join(BASE_DIR, 'private')
MQTT_ACL_ALLOW = False
MQTT_ACL_ALLOW_ANONIMOUS = MQTT_ACL_ALLOW
MQTT_ALLOW_EMPTY_CLIENT_ID = False
MQTT_SESSION_TIMEOUT = 5
WSGI_APPLICATION = 'test_web.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:' if DB_DEBUG else os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
LOGGING_LEVEL = 'DEBUG' if DEBUG else 'INFO'
if 'test' in sys.argv:
LOGGING_LEVEL = 'CRITICAL'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'console': {
'level': LOGGING_LEVEL,
'class': 'logging.StreamHandler',
'formatter': 'simple'
}
},
'loggers': {
'django': {
'handlers': ['console'],
'propagate': True,
'filters': ['require_debug_true']
}
}
}
| gpl-2.0 | 6,900,691,633,989,705,000 | 25.664634 | 95 | 0.641207 | false |
matllubos/django-reversion-log | setup.py | 1 | 1150 | from setuptools import setup, find_packages
from reversion_log.version import get_version
setup(
name='django-reversion-log',
version=get_version(),
description="Log build on revisiions.",
keywords='django, reversion',
author='Lubos Matl',
author_email='[email protected]',
url='https://github.com/matllubos/django-reversion-log',
license='LGPL',
package_dir={'is_core': 'is_core'},
include_package_data=True,
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU LESSER GENERAL PUBLIC LICENSE (LGPL)',
'Natural Language :: Czech',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Site Management',
],
install_requires=[
'django>=1.6',
'django-reversion==1.8.7',
],
zip_safe=False
)
| lgpl-3.0 | -1,228,347,683,448,355,300 | 31.857143 | 78 | 0.618261 | false |
MissionCriticalCloud/cosmic | cosmic-core/systemvm/patches/centos7/opt/cosmic/startup/setup_cpvm.py | 1 | 2608 | import logging
import os
from utils import Utils
def setup_iptable_rules(cmdline):
external_rules = ""
for cidr in cmdline.get('allowedcidrs', '').split(','):
if cidr != '':
external_rules += "-A INPUT -i " + cmdline['publicnic'] + " -s " + cidr.strip() + " -p tcp -m multiport --dports 80,443 -m tcp -j ACCEPT\n"
iptables_rules = """
*nat
:PREROUTING ACCEPT [0:0]
:POSTROUTING ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
COMMIT
*filter
:INPUT DROP [0:0]
:FORWARD DROP [0:0]
:OUTPUT ACCEPT [0:0]
-A INPUT -i lo -j ACCEPT
-A INPUT -i %s -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -i %s -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -i %s -m state --state RELATED,ESTABLISHED -j ACCEPT
-A INPUT -p icmp --icmp-type 13 -j DROP
-A INPUT -p icmp -j ACCEPT
-A INPUT -i %s -p tcp -m state --state NEW -m tcp -s 169.254.0.1/32 --dport 3922 -j ACCEPT
-A INPUT -i %s -p tcp -m state --state NEW -m tcp --dport 8001 -j ACCEPT
-A INPUT -i %s -p tcp -m state --state NEW -m tcp --dport 8001 -j ACCEPT
%s
COMMIT
""" % (
cmdline['controlnic'],
cmdline['mgtnic'],
cmdline['publicnic'],
cmdline['controlnic'],
cmdline['controlnic'],
cmdline['mgtnic'],
external_rules
)
with open("/tmp/iptables-consoleproxy", "w") as f:
f.write(iptables_rules)
os.system("iptables-restore < /tmp/iptables-consoleproxy")
class ConsoleProxyVM:
def __init__(self, cmdline) -> None:
super().__init__()
self.cmdline = cmdline
self.config_dir = "/etc/cosmic/agent/"
def start(self):
logging.info("Setting up configuration for %s" % self.cmdline["type"])
self.setup_agent_config()
setup_iptable_rules(self.cmdline)
if self.cmdline['setrfc1918routes'] == 'true':
logging.info("Setting rfc1918 routes")
Utils(self.cmdline).set_rfc1918_routes()
logging.info("Setting local routes")
Utils(self.cmdline).set_local_routes()
os.system("systemctl start cosmic-agent")
def setup_agent_config(self):
if not os.path.isdir(self.config_dir):
os.makedirs(self.config_dir, 0o644, True)
consoleproxy_properties = """
consoleproxy.tcpListenPort=0
consoleproxy.httpListenPort=80
consoleproxy.httpCmdListenPort=8001
consoleproxy.jarDir=./applet/
consoleproxy.viewerLinger=180
consoleproxy.reconnectMaxRetry=5
"""
with open(self.config_dir + "consoleproxy.properties", "w") as f:
f.write(consoleproxy_properties)
Utils(self.cmdline).setup_agent_properties()
| apache-2.0 | -2,954,845,786,210,160,600 | 28.977011 | 151 | 0.635353 | false |
Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/pyshared/orca/structural_navigation.py | 1 | 153933 | # Orca
#
# Copyright 2005-2009 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Implements structural navigation. Right now this is only
being implemented by Gecko; however it can be used in any
script providing access to document content."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2009 Sun Microsystems Inc."
__license__ = "LGPL"
import pyatspi
import debug
import input_event
import keybindings
import orca
import orca_state
import settings
import speech
from orca_i18n import _
from orca_i18n import ngettext
from orca_i18n import C_
#############################################################################
# #
# MatchCriteria #
# #
#############################################################################
class MatchCriteria:
"""Contains the criteria which will be used to generate a collection
matchRule. We don't want to create the rule until we need it and
are ready to use it. In addition, the creation of an AT-SPI match
rule requires you specify quite a few things (see the __init__),
most of which are irrelevant to the search at hand. This class
makes it possible for the StructuralNavigationObject creator to just
specify the few criteria that actually matter.
"""
def __init__(self,
collection,
states = [],
matchStates = None,
objAttrs = [],
matchObjAttrs = None,
roles = [],
matchRoles = None,
interfaces = "",
matchInterfaces = None,
invert = False,
applyPredicate = False):
"""Creates a new match criteria object.
Arguments:
- collection: the collection interface for the document in
which the accessible objects can be found.
- states: a list of pyatspi states of interest
- matchStates: whether an object must have all of the states
in the states list, any of the states in the list, or none
of the states in the list. Must be one of the collection
interface MatchTypes if provided.
- objAttrs: a list of object attributes (not text attributes)
- matchObjAttrs: whether an object must have all of the
attributes in the objAttrs list, any of the attributes in
the list, or none of the attributes in the list. Must be
one of the collection interface MatchTypes if provided.
- interfaces: (We aren't using this. According to the at-spi
idl, it is a string.)
- matchInterfaces: The collection MatchType for matching by
interface.
- invert: If true the match rule will find objects that don't
match. We always use False.
- applyPredicate: whether or not a predicate should be applied
as an additional check to see if an item is indeed a match.
This is necessary, for instance, when one of the things we
care about is a text attribute, something the collection
interface doesn't include in its criteria.
"""
self.collection = collection
self.matchStates = matchStates or collection.MATCH_ANY
self.objAttrs = objAttrs
self.matchObjAttrs = matchObjAttrs or collection.MATCH_ANY
self.roles = roles
self.matchRoles = matchRoles or collection.MATCH_ANY
self.interfaces = interfaces
self.matchInterfaces = matchInterfaces or collection.MATCH_ALL
self.invert = invert
self.applyPredicate = applyPredicate
self.states = pyatspi.StateSet()
for state in states:
self.states.add(state)
###########################################################################
# #
# StructuralNavigationObject #
# #
###########################################################################
class StructuralNavigationObject:
"""Represents a document object which has identifiable characteristics
which can be used for the purpose of navigation to and among instances
of that object. These characteristics may be something as simple as a
role and/or a state of interest. Or they may be something more complex
such as character counts, text attributes, and other object attributes.
"""
def __init__(self, structuralNavigation, objType, bindings, predicate,
criteria, presentation):
"""Creates a new structural navigation object.
Arguments:
- structuralNavigation: the StructuralNavigation class associated
with this object.
- objType: the type (e.g. BLOCKQUOTE) associated with this object.
- bindings: a dictionary of all of the possible bindings for this
object. In the case of all but the "atLevel" bindings, each
binding takes the form of [keysymstring, modifiers, description].
The goPreviousAtLevel and goNextAtLevel bindings are each a list
of bindings in that form.
- predicate: the predicate to use to determine if a given accessible
matches this structural navigation object. Used when a search via
collection is not possible or practical.
- criteria: a method which returns a MatchCriteria object which
can in turn be used to locate the next/previous matching accessible
via collection.
- presentation: the method which should be called after performing
the search for the structural navigation object.
"""
self.structuralNavigation = structuralNavigation
self.objType = objType
self.bindings = bindings
self.predicate = predicate
self.criteria = criteria
self.present = presentation
self.inputEventHandlers = {}
self.keyBindings = keybindings.KeyBindings()
self.functions = []
self._setUpHandlersAndBindings()
def _setUpHandlersAndBindings(self):
"""Adds the inputEventHandlers and keyBindings for this object."""
# Set up the basic handlers. These are our traditional goPrevious
# and goNext functions.
#
previousBinding = self.bindings.get("previous")
if previousBinding:
[keysymstring, modifiers, description] = previousBinding
handlerName = "%sGoPrevious" % self.objType
self.inputEventHandlers[handlerName] = \
input_event.InputEventHandler(self.goPrevious, description)
self.keyBindings.add(
keybindings.KeyBinding(
keysymstring,
settings.defaultModifierMask,
modifiers,
self.inputEventHandlers[handlerName]))
self.functions.append(self.goPrevious)
nextBinding = self.bindings.get("next")
if nextBinding:
[keysymstring, modifiers, description] = nextBinding
handlerName = "%sGoNext" % self.objType
self.inputEventHandlers[handlerName] = \
input_event.InputEventHandler(self.goNext, description)
self.keyBindings.add(
keybindings.KeyBinding(
keysymstring,
settings.defaultModifierMask,
modifiers,
self.inputEventHandlers[handlerName]))
self.functions.append(self.goNext)
# Set up the "at level" handlers (e.g. to navigate among headings
# at the specified level).
#
previousAtLevel = self.bindings.get("previousAtLevel") or []
for i, binding in enumerate(previousAtLevel):
level = i + 1
handler = self.goPreviousAtLevelFactory(level)
handlerName = "%sGoPreviousLevel%dHandler" % (self.objType, level)
keysymstring, modifiers, description = binding
self.inputEventHandlers[handlerName] = \
input_event.InputEventHandler(handler, description)
self.keyBindings.add(
keybindings.KeyBinding(
keysymstring,
settings.defaultModifierMask,
modifiers,
self.inputEventHandlers[handlerName]))
self.functions.append(handler)
nextAtLevel = self.bindings.get("nextAtLevel") or []
for i, binding in enumerate(nextAtLevel):
level = i + 1
handler = self.goNextAtLevelFactory(level)
handlerName = "%sGoNextLevel%dHandler" % (self.objType, level)
keysymstring, modifiers, description = binding
self.inputEventHandlers[handlerName] = \
input_event.InputEventHandler(handler, description)
self.keyBindings.add(
keybindings.KeyBinding(
keysymstring,
settings.defaultModifierMask,
modifiers,
self.inputEventHandlers[handlerName]))
self.functions.append(handler)
# Set up the "directional" handlers (e.g. for table cells. Live
# region support has a handler to go to the last live region,
# so we'll handle that here as well).
#
directions = {}
directions["Left"] = self.bindings.get("left")
directions["Right"] = self.bindings.get("right")
directions["Up"] = self.bindings.get("up")
directions["Down"] = self.bindings.get("down")
directions["First"] = self.bindings.get("first")
directions["Last"] = self.bindings.get("last")
for direction in directions:
binding = directions.get(direction)
if not binding:
continue
handler = self.goDirectionFactory(direction)
handlerName = "%sGo%s" % (self.objType, direction)
keysymstring, modifiers, description = binding
self.inputEventHandlers[handlerName] = \
input_event.InputEventHandler(handler, description)
self.keyBindings.add(
keybindings.KeyBinding(
keysymstring,
settings.defaultModifierMask,
modifiers,
self.inputEventHandlers[handlerName]))
self.functions.append(handler)
def addHandlerAndBinding(self, binding, handlerName, function):
"""Adds a custom inputEventHandler and keybinding to the object's
handlers and bindings. Right now this is unused, but here in
case a creator of a StructuralNavigationObject had some other
desired functionality in mind.
Arguments:
- binding: [keysymstring, modifiers, description]
- handlerName: a string uniquely identifying the handler
- function: the function associated with the binding
"""
[keysymstring, modifiers, description] = binding
handler = input_event.InputEventHandler(function, description)
keyBinding = keybindings.KeyBinding(
keysymstring,
settings.defaultModifierMask,
modifiers,
handler)
self.inputEventHandlers[handlerName] = handler
self.structuralNavigation.inputEventHandlers[handlerName] = handler
self.functions.append(function)
self.structuralNavigation.functions.append(function)
self.keyBindings.add(keyBinding)
self.structuralNavigation.keyBindings.add(keyBinding)
def goPrevious(self, script, inputEvent):
"""Go to the previous object."""
self.structuralNavigation.goObject(self, False)
def goNext(self, script, inputEvent):
"""Go to the next object."""
self.structuralNavigation.goObject(self, True)
def goPreviousAtLevelFactory(self, level):
"""Generates a goPrevious method for the specified level. Right
now, this is just for headings, but it may have applicability
for other objects such as list items (i.e. for level-based
navigation in an outline or other multi-tiered list.
Arguments:
- level: the desired level of the object as an int.
"""
def goPreviousAtLevel(script, inputEvent):
self.structuralNavigation.goObject(self, False, arg=level)
return goPreviousAtLevel
def goNextAtLevelFactory(self, level):
"""Generates a goNext method for the specified level. Right
now, this is just for headings, but it may have applicability
for other objects such as list items (i.e. for level-based
navigation in an outline or other multi-tiered list.
Arguments:
- level: the desired level of the object as an int.
"""
def goNextAtLevel(script, inputEvent):
self.structuralNavigation.goObject(self, True, arg=level)
return goNextAtLevel
def goDirectionFactory(self, direction):
"""Generates the methods for navigation in a particular direction
(i.e. left, right, up, down, first, last). Right now, this is
primarily for table cells, but it may have applicability for other
objects. For example, when navigating in an outline, one might
want the ability to navigate to the next item at a given level,
but then work his/her way up/down in the hierarchy.
Arguments:
- direction: the direction in which to navigate as a string.
"""
def goCell(script, inputEvent):
thisCell = self.structuralNavigation.getCellForObj(\
self.structuralNavigation.getCurrentObject())
currentCoordinates = \
self.structuralNavigation.getCellCoordinates(thisCell)
if direction == "Left":
desiredCoordinates = [currentCoordinates[0],
currentCoordinates[1] - 1]
elif direction == "Right":
desiredCoordinates = [currentCoordinates[0],
currentCoordinates[1] + 1]
elif direction == "Up":
desiredCoordinates = [currentCoordinates[0] - 1,
currentCoordinates[1]]
elif direction == "Down":
desiredCoordinates = [currentCoordinates[0] + 1,
currentCoordinates[1]]
elif direction == "First":
desiredCoordinates = [0, 0]
else:
desiredCoordinates = [-1, -1]
table = self.structuralNavigation.getTableForCell(thisCell)
if table:
iTable = table.queryTable()
lastRow = iTable.nRows - 1
lastCol = iTable.nColumns - 1
desiredCoordinates = [lastRow, lastCol]
self.structuralNavigation.goCell(self,
thisCell,
currentCoordinates,
desiredCoordinates)
def goLastLiveRegion(script, inputEvent):
"""Go to the last liveRegion."""
if settings.inferLiveRegions:
script.liveMngr.goLastLiveRegion()
else:
# Translators: this announces to the user that live region
# support has been turned off.
#
script.presentMessage(_("Live region support is off"))
if self.objType == StructuralNavigation.TABLE_CELL:
return goCell
elif self.objType == StructuralNavigation.LIVE_REGION \
and direction == "Last":
return goLastLiveRegion
#############################################################################
# #
# StructuralNavigation #
# #
#############################################################################
class StructuralNavigation:
"""This class implements the structural navigation functionality which
is available to scripts. Scripts interested in implementing structural
navigation need to override getEnabledStructuralNavigationTypes() and
return a list of StructuralNavigation object types which should be
enabled.
"""
# The available object types.
#
# Convenience methods have been put into place whereby one can
# create an object (FOO = "foo"), and then provide the following
# methods: _fooBindings(), _fooPredicate(), _fooCriteria(), and
# _fooPresentation(). With these in place, and with the object
# FOO included among the object types returned by the script's
# getEnabledStructuralNavigationTypes(), the StructuralNavigation
# object should be created and set up automagically. At least that
# is the idea. :-) This hopefully will also enable easy re-definition
# of existing StructuralNavigationObjects on a script-by-script basis.
# For instance, in the soffice script, overriding _blockquotePredicate
# should be all that is needed to implement navigation by blockquote
# in OOo Writer documents.
#
ANCHOR = "anchor"
BLOCKQUOTE = "blockquote"
BUTTON = "button"
CHECK_BOX = "checkBox"
CHUNK = "chunk"
COMBO_BOX = "comboBox"
ENTRY = "entry"
FORM_FIELD = "formField"
HEADING = "heading"
LANDMARK = "landmark"
LIST = "list" # Bulleted/numbered lists
LIST_ITEM = "listItem" # Bulleted/numbered list items
LIVE_REGION = "liveRegion"
PARAGRAPH = "paragraph"
RADIO_BUTTON = "radioButton"
SEPARATOR = "separator"
TABLE = "table"
TABLE_CELL = "tableCell"
UNVISITED_LINK = "unvisitedLink"
VISITED_LINK = "visitedLink"
# Whether or not to attempt to use collection. There's no point
# in bothering if we know that the collection interface has not
# been implemented in a given app (e.g. StarOffice/OOo) so this
# variable can be overridden.
#
collectionEnabled = settings.useCollection
# Roles which are recognized as being a form field. Note that this
# is for the purpose of match rules and predicates and refers to
# AT-SPI roles.
#
FORM_ROLES = [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_RADIO_BUTTON,
pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_DOCUMENT_FRAME, # rich text editing
pyatspi.ROLE_LIST,
pyatspi.ROLE_ENTRY,
pyatspi.ROLE_PASSWORD_TEXT,
pyatspi.ROLE_PUSH_BUTTON,
pyatspi.ROLE_SPIN_BUTTON,
pyatspi.ROLE_TEXT]
# Roles which are recognized as being potential "large objects"
# or "chunks." Note that this refers to AT-SPI roles.
#
OBJECT_ROLES = [pyatspi.ROLE_HEADING,
pyatspi.ROLE_LIST,
pyatspi.ROLE_PARAGRAPH,
pyatspi.ROLE_TABLE,
pyatspi.ROLE_TABLE_CELL,
pyatspi.ROLE_TEXT,
pyatspi.ROLE_SECTION,
pyatspi.ROLE_DOCUMENT_FRAME]
def __init__(self, script, enabledTypes, enabled=False):
"""Creates an instance of the StructuralNavigation class.
Arguments:
- script: the script which which this instance is associated.
- enabledTypes: a list of StructuralNavigation object types
which the script is interested in supporting.
- enabled: Whether structural navigation should start out
enabled. For instance, in Gecko by default we do what it
enabled; in soffice, we would want to start out with it
disabled and have the user enable it via a keystroke when
desired.
"""
self._script = script
self.enabled = enabled
# Create all of the StructuralNavigationObject's in which the
# script is interested, using the convenience method
#
self.enabledObjects = {}
for objType in enabledTypes:
self.enabledObjects[objType] = \
self.structuralNavigationObjectCreator(objType)
self.functions = []
self.inputEventHandlers = {}
self.setupInputEventHandlers()
self.keyBindings = self.getKeyBindings()
# When navigating in a non-uniform table, one can move to a
# cell which spans multiple rows and/or columns. When moving
# beyond that cell, into a cell that does NOT span multiple
# rows/columns, we want to be sure we land in the right place.
# Therefore, we'll store the coordinates from "our perspective."
#
self.lastTableCell = [-1, -1]
def structuralNavigationObjectCreator(self, name):
"""This convenience method creates a StructuralNavigationObject
with the specified name and associated characterists. (See the
"Objects" section of code near the end of this class. Creators
of StructuralNavigationObject's can still do things the old
fashioned way should they so choose, by creating the instance
and then adding it via addObject().
Arguments:
- name: the name/objType associated with this object.
"""
# We're going to assume bindings. After all, a structural
# navigation object is by defintion an object which one can
# navigate to using the associated keybindings. For similar
# reasons we'll also assume a predicate and a presentation
# method. (See the Objects section towards the end of this
# class for examples of each.)
#
bindings = eval("self._%sBindings()" % name)
predicate = eval("self._%sPredicate" % name)
presentation = eval("self._%sPresentation" % name)
# We won't make this assumption for match criteria because
# the collection interface might not be implemented (e.g.
# StarOffice/OpenOffice) and/or its use might not be possible
# or practical for a given StructuralNavigationObject (e.g.
# matching by text attributes, spatial navigation within tables).
#
try:
criteria = eval("self._%sCriteria" % name)
except:
criteria = None
return StructuralNavigationObject(self, name, bindings, predicate,
criteria, presentation)
def addObject(self, objType, structuralNavigationObject):
"""Adds structuralNavigationObject to the dictionary of enabled
objects.
Arguments:
- objType: the name/object type of the StructuralNavigationObject.
- structuralNavigationObject: the StructuralNavigationObject to
add.
"""
self.enabledObjects[objType] = structuralNavigationObject
def setupInputEventHandlers(self):
"""Defines InputEventHandler fields for a script."""
if not len(self.enabledObjects):
return
self.inputEventHandlers["toggleStructuralNavigationHandler"] = \
input_event.InputEventHandler(
self.toggleStructuralNavigation,
# Translators: the structural navigation keys are designed
# to move the caret around the document content by object
# type. Thus H moves you to the next heading, Shift H to
# the previous heading, T to the next table, and so on.
# This feature needs to be toggle-able so that it does not
# interfere with normal writing functions.
#
_("Toggles structural navigation keys."))
for structuralNavigationObject in self.enabledObjects.values():
self.inputEventHandlers.update(\
structuralNavigationObject.inputEventHandlers)
self.functions.extend(structuralNavigationObject.functions)
def getKeyBindings(self):
"""Defines the structural navigation key bindings for a script.
Returns: an instance of keybindings.KeyBindings.
"""
keyBindings = keybindings.KeyBindings()
if not len(self.enabledObjects):
return keyBindings
keyBindings.add(
keybindings.KeyBinding(
"z",
settings.defaultModifierMask,
settings.ORCA_MODIFIER_MASK,
self.inputEventHandlers["toggleStructuralNavigationHandler"]))
for structuralNavigationObject in self.enabledObjects.values():
bindings = structuralNavigationObject.keyBindings.keyBindings
for keybinding in bindings:
keyBindings.add(keybinding)
return keyBindings
#########################################################################
# #
# Input Event Handler Methods #
# #
#########################################################################
def toggleStructuralNavigation(self, script, inputEvent):
"""Toggles structural navigation keys."""
self.enabled = not self.enabled
if self.enabled:
# Translators: the structural navigation keys are designed
# to move the caret around document content by object type.
# Thus H moves you to the next heading, Shift H to the
# previous heading, T to the next table, and so on. Some
# users prefer to turn this off to use Firefox's search
# when typing feature. This message is sent to both the
# braille display and the speech synthesizer when the user
# toggles the structural navigation feature of Orca.
# It should be a brief informative message.
#
string = _("Structural navigation keys on.")
else:
# Translators: the structural navigation keys are designed
# to move the caret around document content by object type.
# Thus H moves you to the next heading, Shift H to the
# previous heading, T to the next table, and so on. Some
# users prefer to turn this off to use Firefox's search
# when typing feature. This message is sent to both the
# braille display and the speech synthesizer when the user
# toggles the structural navigation feature of Orca.
# It should be a brief informative message.
#
string = _("Structural navigation keys off.")
debug.println(debug.LEVEL_CONFIGURATION, string)
self._script.presentMessage(string)
#########################################################################
# #
# Methods for Moving to Objects #
# #
#########################################################################
def goCell(self, structuralNavigationObject, thisCell,
currentCoordinates, desiredCoordinates):
"""The method used for navigation among cells in a table.
Arguments:
- structuralNavigationObject: the StructuralNavigationObject which
represents the table cell.
- thisCell: the pyatspi accessible TABLE_CELL we're currently in
- currentCoordinates: the [row, column] of thisCell. Note, we
cannot just get the coordinates because in table cells which
span multiple rows and/or columns, the value returned by
table.getRowAtIndex() is the first row the cell spans. Likewise,
the value returned by table.getColumnAtIndex() is the left-most
column. Therefore, we keep track of the row and column from
our perspective to ensure we stay in the correct row and column.
- desiredCoordinates: the [row, column] where we think we'd like to
be.
"""
table = self.getTableForCell(thisCell)
try:
iTable = table.queryTable()
except:
# Translators: this is for navigating document content by
# moving from table cell to table cell. If the user gives a
# table navigation command but is not in a table, Orca speaks
# this message.
#
self._script.presentMessage(_("Not in a table."))
return None
currentRow, currentCol = currentCoordinates
desiredRow, desiredCol = desiredCoordinates
rowDiff = desiredRow - currentRow
colDiff = desiredCol - currentCol
oldRowHeaders = self._getRowHeaders(thisCell)
oldColHeaders = self._getColumnHeaders(thisCell)
cell = thisCell
while cell:
cell = iTable.getAccessibleAt(desiredRow, desiredCol)
if not cell:
if desiredCol < 0:
# Translators: this is for navigating document
# content by moving from table cell to table cell.
# This is the message spoken when the user attempts
# to move to the left of the current cell and is
# already in the first column.
#
self._script.presentMessage(_("Beginning of row."))
desiredCol = 0
elif desiredCol > iTable.nColumns - 1:
# Translators: this is for navigating document
# content by moving from table cell to table cell.
# This is the message spoken when the user attempts
# to move to the right of the current cell and is
# already in the last column.
#
self._script.presentMessage(_("End of row."))
desiredCol = iTable.nColumns - 1
if desiredRow < 0:
# Translators: this is for navigating document
# content by moving from table cell to table cell.
# This is the message spoken when the user attempts
# to move to the cell above the current cell and is
# already in the first row.
#
self._script.presentMessage(_("Top of column."))
desiredRow = 0
elif desiredRow > iTable.nRows - 1:
# Translators: this is for navigating document
# content by moving from table cell to table cell.
# This is the message spoken when the user attempts
# to move to the cell below the current cell and is
# already in the last row.
#
self._script.presentMessage(_("Bottom of column."))
desiredRow = iTable.nRows - 1
elif self._script.utilities.isSameObject(thisCell, cell) \
or settings.skipBlankCells and self._isBlankCell(cell):
if colDiff < 0:
desiredCol -= 1
elif colDiff > 0:
desiredCol += 1
if rowDiff < 0:
desiredRow -= 1
elif rowDiff > 0:
desiredRow += 1
else:
break
self.lastTableCell = [desiredRow, desiredCol]
if cell:
arg = [rowDiff, colDiff, oldRowHeaders, oldColHeaders]
structuralNavigationObject.present(cell, arg)
def goObject(self, structuralNavigationObject, isNext, obj=None, arg=None):
"""The method used for navigation among StructuralNavigationObjects
which are not table cells.
Arguments:
- structuralNavigationObject: the StructuralNavigationObject which
represents the object of interest.
- isNext: If True, we're interested in the next accessible object
which matches structuralNavigationObject. If False, we're
interested in the previous accessible object which matches.
- obj: the current object (typically the locusOfFocus).
- arg: optional arguments which may need to be passed along to
the predicate, presentation method, etc. For instance, in the
case of navigating amongst headings at a given level, the level
is needed and passed in as arg.
"""
obj = obj or self.getCurrentObject()
# Yelp is seemingly fond of killing children for sport. Better
# check for that.
#
try:
state = obj.getState()
except:
return [None, False]
else:
if state.contains(pyatspi.STATE_DEFUNCT):
#print "goObject: defunct object", obj
debug.printException(debug.LEVEL_SEVERE)
return [None, False]
success = False
wrap = settings.wrappedStructuralNavigation
# Try to find it using Collection first. But don't do this with form
# fields for now. It's a bit faster moving to the next form field,
# but not on pages with huge forms (e.g. bugzilla's advanced search
# page). And due to bug #538680, we definitely don't want to use
# collection to go to the previous chunk or form field.
#
formObjects = [self.BUTTON, self.CHECK_BOX, self.COMBO_BOX,
self.ENTRY, self.FORM_FIELD, self.RADIO_BUTTON]
criteria = None
objType = structuralNavigationObject.objType
if self.collectionEnabled \
and not objType in formObjects \
and (isNext or objType != self.CHUNK):
try:
document = self._getDocument()
collection = document.queryCollection()
if structuralNavigationObject.criteria:
criteria = structuralNavigationObject.criteria(collection,
arg)
except:
debug.printException(debug.LEVEL_SEVERE)
else:
# If the document frame itself contains content and that is
# our current object, querying the collection interface will
# result in our starting at the top when looking for the next
# object rather than the current caret offset. See bug 567984.
#
if isNext \
and self._script.utilities.isSameObject(obj, document):
criteria = None
if criteria:
try:
rule = collection.createMatchRule(criteria.states.raw(),
criteria.matchStates,
criteria.objAttrs,
criteria.matchObjAttrs,
criteria.roles,
criteria.matchRoles,
criteria.interfaces,
criteria.matchInterfaces,
criteria.invert)
if criteria.applyPredicate:
predicate = structuralNavigationObject.predicate
else:
predicate = None
if not isNext:
[obj, wrapped] = self._findPrevByMatchRule(collection,
rule,
wrap,
obj,
predicate)
else:
[obj, wrapped] = self._findNextByMatchRule(collection,
rule,
wrap,
obj,
predicate)
success = True
collection.freeMatchRule(rule)
# print "collection", structuralNavigationObject.objType
except NotImplementedError:
debug.printException(debug.LEVEL_SEVERE)
except:
debug.printException(debug.LEVEL_SEVERE)
collection.freeMatchRule(rule)
# Do it iteratively when Collection failed or is disabled
#
if not success:
pred = structuralNavigationObject.predicate
if not isNext:
[obj, wrapped] = self._findPrevByPredicate(pred, wrap,
obj, arg)
else:
[obj, wrapped] = self._findNextByPredicate(pred, wrap,
obj, arg)
# print "predicate", structuralNavigationObject.objType
if wrapped:
if not isNext:
# Translators: when the user is attempting to locate a
# particular object and the top of a page or list is
# reached without that object being found, we "wrap" to
# the bottom and continue looking upwards. We need to
# inform the user when this is taking place.
#
self._script.presentMessage(_("Wrapping to bottom."))
else:
# Translators: when the user is attempting to locate a
# particular object and the bottom of a page or list is
# reached without that object being found, we "wrap" to the
# top and continue looking downwards. We need to inform the
# user when this is taking place.
#
self._script.presentMessage(_("Wrapping to top."))
structuralNavigationObject.present(obj, arg)
#########################################################################
# #
# Utility Methods for Finding Objects #
# #
#########################################################################
def getCurrentObject(self):
"""Returns the current object. Normally, the locusOfFocus. But
in the case of Gecko, that doesn't always work.
"""
return orca_state.locusOfFocus
def _findPrevByMatchRule(self, collection, matchRule, wrap, currentObj,
predicate=None):
"""Finds the previous object using the given match rule as a
pattern to match or not match.
Arguments:
-collection: the accessible collection interface
-matchRule: the collections match rule to use
-wrap: if True and the bottom of the document is reached, move
to the top and keep looking.
-currentObj: the object from which the search should begin
-predicate: an optional predicate to further test if the item
found via collection is indeed a match.
Returns: [obj, wrapped] where wrapped is a boolean reflecting
whether wrapping took place.
"""
currentObj = currentObj or self.getCurrentObject()
document = self._getDocument()
# If the current object is the document itself, find an actual
# object to use as the starting point. Otherwise we're in
# danger of skipping over the objects in between our present
# location and top of the document.
#
if self._script.utilities.isSameObject(currentObj, document):
currentObj = self._findNextObject(currentObj, document)
ancestors = []
obj = currentObj.parent
if obj.getRole() in [pyatspi.ROLE_LIST, pyatspi.ROLE_TABLE]:
ancestors.append(obj)
else:
while obj:
ancestors.append(obj)
obj = obj.parent
match, wrapped = None, False
results = collection.getMatchesTo(currentObj,
matchRule,
collection.SORT_ORDER_CANONICAL,
collection.TREE_INORDER,
True,
1,
True)
while not match:
if len(results) == 0:
if wrapped or not wrap:
break
elif wrap:
lastObj = self._findLastObject(document)
# Collection does not do an inclusive search, meaning
# that the start object is not part of the search. So
# we need to test the lastobj separately using the given
# matchRule. We don't have this problem for 'Next' because
# the startobj is the doc frame.
#
secondLastObj = self._findPreviousObject(lastObj, document)
results = collection.getMatchesFrom(\
secondLastObj,
matchRule,
collection.SORT_ORDER_CANONICAL,
collection.TREE_INORDER,
1,
True)
wrapped = True
if len(results) > 0 \
and (not predicate or predicate(results[0])):
match = results[0]
else:
results = collection.getMatchesTo(\
lastObj,
matchRule,
collection.SORT_ORDER_CANONICAL,
collection.TREE_INORDER,
True,
1,
True)
elif len(results) > 0:
if results[0] in ancestors \
or predicate and not predicate(results[0]):
results = collection.getMatchesTo(\
results[0],
matchRule,
collection.SORT_ORDER_CANONICAL,
collection.TREE_INORDER,
True,
1,
True)
else:
match = results[0]
return [match, wrapped]
def _findNextByMatchRule(self, collection, matchRule, wrap, currentObj,
predicate=None):
"""Finds the next object using the given match rule as a pattern
to match or not match.
Arguments:
-collection: the accessible collection interface
-matchRule: the collections match rule to use
-wrap: if True and the bottom of the document is reached, move
to the top and keep looking.
-currentObj: the object from which the search should begin
-predicate: an optional predicate to further test if the item
found via collection is indeed a match.
Returns: [obj, wrapped] where wrapped is a boolean reflecting
whether wrapping took place.
"""
currentObj = currentObj or self.getCurrentObject()
ancestors = []
[currentObj, offset] = self._script.getCaretContext()
obj = currentObj.parent
while obj:
ancestors.append(obj)
obj = obj.parent
match, wrapped = None, False
while not match:
results = collection.getMatchesFrom(\
currentObj,
matchRule,
collection.SORT_ORDER_CANONICAL,
collection.TREE_INORDER,
1,
True)
if len(results) > 0 and not results[0] in ancestors:
currentObj = results[0]
if not predicate or predicate(currentObj):
match = currentObj
elif wrap and not wrapped:
wrapped = True
ancestors = [currentObj]
currentObj = self._getDocument()
else:
break
return [match, wrapped]
def _findPrevByPredicate(self, pred, wrap, currentObj=None, arg=None):
"""Finds the caret offset at the beginning of the previous object
using the given predicate as a pattern to match.
Arguments:
-pred: a python callable that takes an accessible argument and
returns true/false based on some match criteria
-wrap: if True and the top of the document is reached, move
to the bottom and keep looking.
-currentObj: the object from which the search should begin
-arg: an additional value to be passed to the predicate
Returns: [obj, wrapped] where wrapped is a boolean reflecting
whether wrapping took place.
"""
currentObj = currentObj or self.getCurrentObject()
document = self._getDocument()
# If the current object is the document itself, find an actual
# object to use as the starting point. Otherwise we're in
# danger of skipping over the objects in between our present
# location and top of the document.
#
if self._script.utilities.isSameObject(currentObj, document):
currentObj = self._findNextObject(currentObj, document)
ancestors = []
nestableRoles = [pyatspi.ROLE_LIST, pyatspi.ROLE_TABLE]
obj = currentObj.parent
while obj:
ancestors.append(obj)
obj = obj.parent
obj = self._findPreviousObject(currentObj, document)
wrapped = obj is None
match = None
if wrapped:
obj = self._findLastObject(document)
while obj and not match:
isNested = (obj != currentObj.parent \
and currentObj.parent.getRole() == obj.getRole() \
and obj.getRole() in nestableRoles)
if (not obj in ancestors or isNested) and pred(obj):
if wrapped \
and self._script.utilities.isSameObject(currentObj, obj):
break
else:
match = obj
else:
obj = self._findPreviousObject(obj, document)
if not obj and wrap and not wrapped:
obj = self._findLastObject(document)
wrapped = True
return [match, wrapped]
def _findNextByPredicate(self, pred, wrap, currentObj=None, arg=None):
"""Finds the caret offset at the beginning of the next object
using the given predicate as a pattern to match or not match.
Arguments:
-pred: a python callable that takes an accessible argument and
returns true/false based on some match criteria
-wrap: if True and the bottom of the document is reached, move
to the top and keep looking.
-currentObj: the object from which the search should begin
-arg: an additional value to be passed to the predicate
Returns: [obj, wrapped] where wrapped is a boolean reflecting
whether wrapping took place.
"""
currentObj = currentObj or self.getCurrentObject()
ancestors = []
obj = currentObj.parent
while obj:
ancestors.append(obj)
obj = obj.parent
document = self._getDocument()
obj = self._findNextObject(currentObj, document)
wrapped = obj is None
match = None
if wrapped:
[obj, offset] = self._getCaretPosition(document)
while obj and not match:
if (not obj in ancestors) and pred(obj, arg):
if wrapped \
and self._script.utilities.isSameObject(currentObj, obj):
break
else:
match = obj
else:
obj = self._findNextObject(obj, document)
if not obj and wrap and not wrapped:
[obj, offset] = self._getCaretPosition(document)
wrapped = True
return [match, wrapped]
def _findPreviousObject(self, obj, stopAncestor):
"""Finds the object prior to this one, where the tree we're
dealing with is a DOM and 'prior' means the previous object
in a linear presentation sense.
Arguments:
-obj: the object where to start.
-stopAncestor: the ancestor at which the search should stop
"""
# NOTE: This method is based on some intial experimentation
# with OOo structural navigation. It might need refining
# or fixing and is being overridden by the Gecko method
# regardless, so this one can be modified as appropriate.
#
prevObj = None
index = obj.getIndexInParent() - 1
if index >= 0:
prevObj = obj.parent[index]
if prevObj.childCount:
prevObj = prevObj[prevObj.childCount - 1]
elif not self._script.utilities.isSameObject(obj.parent, stopAncestor):
prevObj = obj.parent
return prevObj
def _findNextObject(self, obj, stopAncestor):
"""Finds the object after to this one, where the tree we're
dealing with is a DOM and 'next' means the next object
in a linear presentation sense.
Arguments:
-obj: the object where to start.
-stopAncestor: the ancestor at which the search should stop
"""
# NOTE: This method is based on some intial experimentation
# with OOo structural navigation. It might need refining
# or fixing and is being overridden by the Gecko method
# regardless, so this one can be modified as appropriate.
#
nextObj = None
if obj and obj.childCount:
nextObj = obj[0]
while obj and obj.parent != obj and not nextObj:
index = obj.getIndexInParent() + 1
if 0 < index < obj.parent.childCount:
nextObj = obj.parent[index]
elif not self._script.utilities.isSameObject(
obj.parent, stopAncestor):
obj = obj.parent
else:
break
return nextObj
def _findLastObject(self, ancestor):
"""Returns the last object in ancestor.
Arguments:
- ancestor: the accessible object whose last (child) object
is sought.
"""
# NOTE: This method is based on some intial experimentation
# with OOo structural navigation. It might need refining
# or fixing and is being overridden by the Gecko method
# regardless, so this one can be modified as appropriate.
#
if not ancestor or not ancestor.childCount:
return ancestor
lastChild = ancestor[ancestor.childCount - 1]
while lastChild:
lastObj = self._findNextObject(lastChild, ancestor)
if lastObj:
lastChild = lastObj
else:
break
return lastChild
def _getDocument(self):
"""Returns the document or other object in which the object of
interest is contained.
"""
docRoles = [pyatspi.ROLE_DOCUMENT_FRAME]
stopRoles = [pyatspi.ROLE_FRAME, pyatspi.ROLE_SCROLL_PANE]
document = self._script.utilities.ancestorWithRole(
orca_state.locusOfFocus, docRoles, stopRoles)
return document
def _isInDocument(self, obj):
"""Returns True if the accessible object obj is inside of
the document.
Arguments:
-obj: the accessible object of interest.
"""
document = self._getDocument()
while obj and obj.parent:
if self._script.utilities.isSameObject(obj.parent, document):
return True
else:
obj = obj.parent
return False
def _isUselessObject(self, obj):
"""Returns True if the accessible object obj is an object
that doesn't have any meaning associated with it. Individual
scripts should override this method as needed. Gecko does.
Arguments:
- obj: the accessible object of interest.
"""
return False
#########################################################################
# #
# Methods for Presenting Objects #
# #
#########################################################################
def _getTableCaption(self, obj):
"""Returns a string which contains the table caption, or
None if a caption could not be found.
Arguments:
- obj: the accessible table whose caption we want.
"""
caption = obj.queryTable().caption
try:
caption.queryText()
except:
return None
else:
return self._script.utilities.displayedText(caption)
def _getTableDescription(self, obj):
"""Returns a string which describes the table."""
nonUniformString = ""
nonUniform = self._isNonUniformTable(obj)
if nonUniform:
# Translators: a uniform table is one in which each table
# cell occupies one row and one column (i.e. a perfect grid)
# In contrast, a non-uniform table is one in which at least
# one table cell occupies more than one row and/or column.
#
nonUniformString = _("Non-uniform") + " "
table = obj.queryTable()
nRows = table.nRows
nColumns = table.nColumns
# Translators: this represents the number of rows in a table.
#
rowString = ngettext("table with %d row",
"table with %d rows",
nRows) % nRows
# Translators: this represents the number of columns in a table.
#
colString = ngettext("%d column",
"%d columns",
nColumns) % nColumns
return (nonUniformString + rowString + " " + colString)
def _isNonUniformTable(self, obj):
"""Returns True if the obj is a non-uniform table (i.e. a table
where at least one cell spans multiple rows and/or columns).
Arguments:
- obj: the table to examine
"""
try:
table = obj.queryTable()
except:
pass
else:
for i in xrange(obj.childCount):
[isCell, row, col, rowExtents, colExtents, isSelected] = \
table.getRowColumnExtentsAtIndex(i)
if (rowExtents > 1) or (colExtents > 1):
return True
return False
def getCellForObj(self, obj):
"""Looks for a table cell in the ancestry of obj, if obj is not a
table cell.
Arguments:
- obj: the accessible object of interest.
"""
cellRoles = [pyatspi.ROLE_TABLE_CELL, pyatspi.ROLE_COLUMN_HEADER]
if obj and not obj.getRole() in cellRoles:
document = self._getDocument()
obj = self._script.utilities.ancestorWithRole(
obj, cellRoles, [document.getRole()])
return obj
def getTableForCell(self, obj):
"""Looks for a table in the ancestry of obj, if obj is not a table.
Arguments:
- obj: the accessible object of interest.
"""
if obj and obj.getRole() != pyatspi.ROLE_TABLE:
document = self._getDocument()
obj = self._script.utilities.ancestorWithRole(
obj, [pyatspi.ROLE_TABLE], [document.getRole()])
return obj
def _isBlankCell(self, obj):
"""Returns True if the table cell is empty or consists of whitespace.
Arguments:
- obj: the accessible table cell to examime
"""
if obj and obj.getRole() == pyatspi.ROLE_COLUMN_HEADER and obj.name:
return False
text = self._script.utilities.displayedText(obj)
if text and len(text.strip()) and text != obj.name:
return False
else:
for child in obj:
text = self._script.utilities.displayedText(child)
if text and len(text.strip()) \
or child.getRole() == pyatspi.ROLE_LINK:
return False
return True
def _getCellText(self, obj):
"""Looks at the table cell and tries to get its text.
Arguments:
- obj: the accessible table cell to examime
"""
text = ""
if obj and not obj.childCount:
text = self._script.utilities.displayedText(obj)
else:
for child in obj:
childText = self._script.utilities.displayedText(child)
text = self._script.utilities.appendString(text, childText)
return text
def _presentCellHeaders(self, cell, oldCellInfo):
"""Speaks the headers of the accessible table cell, cell.
Arguments:
- cell: the accessible table cell whose headers we wish to
present.
- oldCellInfo: [rowDiff, colDiff, oldRowHeaders, oldColHeaders]
"""
if not cell or not oldCellInfo:
return
rowDiff, colDiff, oldRowHeaders, oldColHeaders = oldCellInfo
if not (oldRowHeaders or oldColHeaders):
return
# We only want to speak the header information that has
# changed, and we don't want to speak headers if we're in
# a header row/col.
#
if rowDiff and not self._isInHeaderRow(cell):
rowHeaders = self._getRowHeaders(cell)
for header in rowHeaders:
if not header in oldRowHeaders:
text = self._getCellText(header)
speech.speak(text)
if colDiff and not self._isInHeaderColumn(cell):
colHeaders = self._getColumnHeaders(cell)
for header in colHeaders:
if not header in oldColHeaders:
text = self._getCellText(header)
speech.speak(text)
def _getCellSpanInfo(self, obj):
"""Returns a string reflecting the number of rows and/or columns
spanned by a table cell when multiple rows and/or columns are
spanned.
Arguments:
- obj: the accessible table cell whose cell span we want.
"""
if not obj or (obj.getRole() != pyatspi.ROLE_TABLE_CELL):
return
parentTable = self.getTableForCell(obj)
try:
table = parentTable.queryTable()
except:
return
[row, col] = self.getCellCoordinates(obj)
rowspan = table.getRowExtentAt(row, col)
colspan = table.getColumnExtentAt(row, col)
spanString = ""
if (colspan > 1) and (rowspan > 1):
# Translators: The cell here refers to a cell within a table
# within a document. We need to announce when the cell occupies
# or "spans" more than a single row and/or column.
#
spanString = ngettext("Cell spans %d row",
"Cell spans %d rows",
rowspan) % rowspan
# Translators: this represents the number of columns in a table.
#
spanString += ngettext(" %d column",
" %d columns",
colspan) % colspan
elif (colspan > 1):
# Translators: The cell here refers to a cell within a table
# within a document. We need to announce when the cell occupies
# or "spans" more than a single row and/or column.
#
spanString = ngettext("Cell spans %d column",
"Cell spans %d columns",
colspan) % colspan
elif (rowspan > 1):
# Translators: The cell here refers to a cell within a table
# within a document. We need to announce when the cell occupies
# or "spans" more than a single row and/or column.
#
spanString = ngettext("Cell spans %d row",
"Cell spans %d rows",
rowspan) % rowspan
return spanString
def getCellCoordinates(self, obj):
"""Returns the [row, col] of a ROLE_TABLE_CELL or [-1, -1]
if the coordinates cannot be found.
Arguments:
- obj: the accessible table cell whose coordinates we want.
"""
obj = self.getCellForObj(obj)
parent = self.getTableForCell(obj)
try:
table = parent.queryTable()
except:
pass
else:
# If we're in a cell that spans multiple rows and/or columns,
# thisRow and thisCol will refer to the upper left cell in
# the spanned range(s). We're storing the lastTableCell that
# we're aware of in order to facilitate more linear movement.
# Therefore, if the lastTableCell and this table cell are the
# same cell, we'll go with the stored coordinates.
#
lastRow, lastCol = self.lastTableCell
lastKnownCell = table.getAccessibleAt(lastRow, lastCol)
if self._script.utilities.isSameObject(lastKnownCell, obj):
return [lastRow, lastCol]
else:
index = self._script.utilities.cellIndex(obj)
thisRow = table.getRowAtIndex(index)
thisCol = table.getColumnAtIndex(index)
return [thisRow, thisCol]
return [-1, -1]
def _getRowHeaders(self, obj):
"""Returns a list of table cells that serve as a row header for
the specified TABLE_CELL.
Arguments:
- obj: the accessible table cell whose header(s) we want.
"""
rowHeaders = []
if not obj:
return rowHeaders
parentTable = self.getTableForCell(obj)
try:
table = parentTable.queryTable()
except:
pass
else:
[row, col] = self.getCellCoordinates(obj)
# Theoretically, we should be able to quickly get the text
# of a {row, column}Header via get{Row,Column}Description().
# Gecko doesn't expose the information that way, however.
# get{Row,Column}Header seems to work sometimes.
#
header = table.getRowHeader(row)
if header:
rowHeaders.append(header)
# Headers that are strictly marked up with <th> do not seem
# to be exposed through get{Row, Column}Header.
#
else:
# If our cell spans multiple rows, we want to get all of
# the headers that apply.
#
rowspan = table.getRowExtentAt(row, col)
for r in range(row, row+rowspan):
# We could have multiple headers for a given row, one
# header per column. Presumably all of the headers are
# prior to our present location.
#
for c in range(0, col):
cell = table.getAccessibleAt(r, c)
if self._isHeader(cell) and not cell in rowHeaders:
rowHeaders.append(cell)
return rowHeaders
def _getColumnHeaders(self, obj):
"""Returns a list of table cells that serve as a column header for
the specified TABLE_CELL.
Arguments:
- obj: the accessible table cell whose header(s) we want.
"""
columnHeaders = []
if not obj:
return columnHeaders
parentTable = self.getTableForCell(obj)
try:
table = parentTable.queryTable()
except:
pass
else:
[row, col] = self.getCellCoordinates(obj)
# Theoretically, we should be able to quickly get the text
# of a {row, column}Header via get{Row,Column}Description().
# Gecko doesn't expose the information that way, however.
# get{Row,Column}Header seems to work sometimes.
#
header = table.getColumnHeader(col)
if header:
columnHeaders.append(header)
# Headers that are strictly marked up with <th> do not seem
# to be exposed through get{Row, Column}Header.
#
else:
# If our cell spans multiple columns, we want to get all of
# the headers that apply.
#
colspan = table.getColumnExtentAt(row, col)
for c in range(col, col+colspan):
# We could have multiple headers for a given column, one
# header per row. Presumably all of the headers are
# prior to our present location.
#
for r in range(0, row):
cell = table.getAccessibleAt(r, c)
if self._isHeader(cell) and not cell in columnHeaders:
columnHeaders.append(cell)
return columnHeaders
def _isInHeaderRow(self, obj):
"""Returns True if all of the cells in the same row as this cell are
headers.
Arguments:
- obj: the accessible table cell whose row is to be examined.
"""
if obj and obj.getRole() == pyatspi.ROLE_TABLE_CELL:
parentTable = self.getTableForCell(obj)
try:
table = parentTable.queryTable()
except:
return True
index = self._script.utilities.cellIndex(obj)
row = table.getRowAtIndex(index)
for col in xrange(table.nColumns):
cell = table.getAccessibleAt(row, col)
if not self._isHeader(cell):
return False
return True
def _isInHeaderColumn(self, obj):
"""Returns True if all of the cells in the same column as this cell
are headers.
Arguments:
- obj: the accessible table cell whose column is to be examined.
"""
if obj and obj.getRole() == pyatspi.ROLE_TABLE_CELL:
parentTable = self.getTableForCell(obj)
try:
table = parentTable.queryTable()
except:
return True
index = self._script.utilities.cellIndex(obj)
col = table.getColumnAtIndex(index)
for row in xrange(table.nRows):
cell = table.getAccessibleAt(row, col)
if not self._isHeader(cell):
return False
return True
def _isHeader(self, obj):
"""Returns True if the table cell is a header.
Arguments:
- obj: the accessible table cell to examine.
"""
if not obj:
return False
elif obj.getRole() in [pyatspi.ROLE_TABLE_COLUMN_HEADER,
pyatspi.ROLE_TABLE_ROW_HEADER,
pyatspi.ROLE_COLUMN_HEADER]:
return True
else:
attributes = obj.getAttributes()
if attributes:
for attribute in attributes:
if attribute == "tag:TH":
return True
return False
def _getHeadingLevel(self, obj):
"""Determines the heading level of the given object. A value
of 0 means there is no heading level.
Arguments:
- obj: the accessible whose heading level we want.
"""
level = 0
if obj is None:
return level
if obj.getRole() == pyatspi.ROLE_HEADING:
attributes = obj.getAttributes()
if attributes is None:
return level
for attribute in attributes:
if attribute.startswith("level:"):
level = int(attribute.split(":")[1])
break
return level
def _getCaretPosition(self, obj):
"""Returns the [obj, characterOffset] where the caret should be
positioned. For most scripts, the object should not change and
the offset should be 0. That's not always the case with Gecko.
Arguments:
- obj: the accessible object in which the caret should be
positioned.
"""
return [obj, 0]
def _setCaretPosition(self, obj, characterOffset):
"""Sets the caret at the specified offset within obj.
Arguments:
- obj: the accessible object in which the caret should be
positioned.
- characterOffset: the offset at which to position the caret.
"""
try:
text = obj.queryText()
text.setCaretOffset(characterOffset)
except NotImplementedError:
try:
obj.queryComponent().grabFocus()
except:
debug.printException(debug.LEVEL_SEVERE)
except:
debug.printException(debug.LEVEL_SEVERE)
orca.setLocusOfFocus(None, obj, notifyScript=False)
def _presentLine(self, obj, offset):
"""Presents the first line of the object to the user.
Arguments:
- obj: the accessible object to be presented.
- offset: the character offset within obj.
"""
self._script.updateBraille(obj)
self._script.sayLine(obj)
def _presentObject(self, obj, offset):
"""Presents the entire object to the user.
Arguments:
- obj: the accessible object to be presented.
- offset: the character offset within obj.
"""
self._script.updateBraille(obj)
# [[[TODO: WDW - move the voice selection to formatting.py
# at some point.]]]
#
voices = self._script.voices
if obj.getRole() == pyatspi.ROLE_LINK:
voice = voices[settings.HYPERLINK_VOICE]
else:
voice = voices[settings.DEFAULT_VOICE]
utterances = self._script.speechGenerator.generateSpeech(obj)
speech.speak(utterances, voice)
#########################################################################
# #
# Objects #
# #
#########################################################################
# All structural navigation objects have the following essential
# characteristics:
#
# 1. Keybindings for goPrevious, goNext, and other such methods
# 2. A means of identification (at least a predicate and possibly
# also criteria for generating a collection match rule)
# 3. A definition of how the object should be presented (both
# when another instance of that object is found as well as
# when it is not)
#
# Convenience methods have been put into place whereby one can
# create an object (FOO = "foo"), and then provide the following
# methods: _fooBindings(), _fooPredicate(), _fooCriteria(), and
# _fooPresentation(). With these in place, and with the object
# FOO included among the StructuralNavigation.enabledTypes for
# the script, the structural navigation object should be created
# and set up automagically. At least that is the idea. :-) This
# hopefully will also enable easy re-definition of existing
# objects on a script-by-script basis. For instance, in the
# StarOffice script, overriding the _blockquotePredicate should
# be all that is needed to implement navigation by blockquote
# in OOo Writer documents.
#
########################
# #
# Anchors #
# #
########################
def _anchorBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst anchors.
"""
# NOTE: This doesn't handle the case where the anchor is not an
# old-school <a name/id="foo"></a> anchor. For instance on the
# GNOME wiki, an "anchor" is actually an id applied to some other
# tag (e.g. <h2 id="foo">My Heading</h2>. We'll have to be a
# bit more clever for those. With the old-school anchors, this
# seems to work nicely and provides the user with a way to jump
# among defined areas without having to find a Table of Contents
# group of links (assuming such a thing is even present on the
# page).
bindings = {}
# Translators: this is for navigating among anchors in a document.
# An anchor is a named spot that one can jump to.
#
prevDesc = _("Goes to previous anchor.")
bindings["previous"] = ["a", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among anchors in a document.
# An anchor is a named spot that one can jump to.
#
nextDesc = _("Goes to next anchor.")
bindings["next"] = ["a", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _anchorCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating anchors
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_LINK]
state = [pyatspi.STATE_FOCUSABLE]
stateMatch = collection.MATCH_NONE
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _anchorPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is an anchor.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_LINK:
state = obj.getState()
isMatch = not state.contains(pyatspi.STATE_FOCUSABLE)
return isMatch
def _anchorPresentation(self, obj, arg=None):
"""Presents the anchor or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentObject(obj, characterOffset)
else:
# Translators: this is for navigating document content by
# moving from anchor to anchor. (An anchor is a named spot
# that one can jump to.) This is a detailed message which
# will be presented to the user if no more anchors can be found.
#
full = _("No more anchors.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Blockquotes #
# #
########################
def _blockquoteBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating among blockquotes.
"""
bindings = {}
# Translators: this is for navigating among blockquotes in a
# document.
#
prevDesc = _("Goes to previous blockquote.")
bindings["previous"] = ["q", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among blockquotes in a
# document.
#
nextDesc = _("Goes to next blockquote.")
bindings["next"] = ["q", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _blockquoteCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating blockquotes
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
attrs = ['tag:BLOCKQUOTE']
return MatchCriteria(collection, objAttrs=attrs)
def _blockquotePredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a blockquote.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if not obj:
return False
attributes = obj.getAttributes()
if attributes:
for attribute in attributes:
if attribute == "tag:BLOCKQUOTE":
return True
return False
def _blockquotePresentation(self, obj, arg=None):
"""Presents the blockquote or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
# TODO: We currently present the line, so that's kept here.
# But we should probably present the object, which would
# be consistent with the change made recently for headings.
#
self._presentLine(obj, characterOffset)
else:
# Translators: this is for navigating document content by
# moving from blockquote to blockquote. This is a detailed
# message which will be presented to the user if no more
# blockquotes can be found.
#
full = _("No more blockquotes.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Buttons #
# #
########################
def _buttonBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst buttons.
"""
bindings = {}
# Translators: this is for navigating among buttons in a form
# within a document.
#
prevDesc = _("Goes to previous button.")
bindings["previous"] = ["b", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among buttons in a form
# within a document.
#
nextDesc = _("Goes to next button.")
bindings["next"] = ["b", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _buttonCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating buttons
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_PUSH_BUTTON]
state = [pyatspi.STATE_FOCUSABLE, pyatspi.STATE_SENSITIVE]
stateMatch = collection.MATCH_ALL
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _buttonPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a button.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_PUSH_BUTTON:
state = obj.getState()
isMatch = state.contains(pyatspi.STATE_FOCUSABLE) \
and state.contains(pyatspi.STATE_SENSITIVE)
return isMatch
def _buttonPresentation(self, obj, arg=None):
"""Presents the button or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
obj.queryComponent().grabFocus()
else:
# Translators: this is for navigating document content by
# moving from push button to push button in a form. This is
# a detailed message which will be presented to the user if
# no more push buttons can be found.
#
full = _("No more buttons.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Check boxes #
# #
########################
def _checkBoxBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst check boxes.
"""
bindings = {}
# Translators: this is for navigating among check boxes in a form
# within a document.
#
prevDesc = _("Goes to previous check box.")
bindings["previous"] = ["x", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among check boxes in a form
# within a document.
#
nextDesc = _("Goes to next check box.")
bindings["next"] = ["x", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _checkBoxCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating check boxes
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_CHECK_BOX]
state = [pyatspi.STATE_FOCUSABLE, pyatspi.STATE_SENSITIVE]
stateMatch = collection.MATCH_ALL
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _checkBoxPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a check box.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_CHECK_BOX:
state = obj.getState()
isMatch = state.contains(pyatspi.STATE_FOCUSABLE) \
and state.contains(pyatspi.STATE_SENSITIVE)
return isMatch
def _checkBoxPresentation(self, obj, arg=None):
"""Presents the check box or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
obj.queryComponent().grabFocus()
else:
# Translators: this is for navigating document content by
# moving from checkbox to checkbox in a form. This is a
# detailed message which will be presented to the user if
# no more checkboxes can be found.
#
full = _("No more check boxes.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Chunks/Large Objects #
# #
########################
def _chunkBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst chunks/large objects.
"""
bindings = {}
# Translators: this is for navigating a document in a
# structural manner, where a 'large object' is a logical
# chunk of text, such as a paragraph, a list, a table, etc.
#
prevDesc = _("Goes to previous large object.")
bindings["previous"] = ["o", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating a document in a
# structural manner, where a 'large object' is a logical
# chunk of text, such as a paragraph, a list, a table, etc.
#
nextDesc = _("Goes to next large object.")
bindings["next"] = ["o", settings.NO_MODIFIER_MASK, nextDesc]
# I don't think it makes sense to add support for a list
# of chunks. But one could always change that here.
#
return bindings
def _chunkCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating chunks/
large objects by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = self.OBJECT_ROLES
roleMatch = collection.MATCH_ANY
return MatchCriteria(collection,
roles=role,
matchRoles=roleMatch,
applyPredicate=True)
def _chunkPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a chunk.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() in self.OBJECT_ROLES:
try:
text = obj.queryText()
characterCount = text.characterCount
except:
characterCount = 0
if characterCount > settings.largeObjectTextLength \
and not self._isUselessObject(obj):
isMatch = True
return isMatch
def _chunkPresentation(self, obj, arg=None):
"""Presents the chunk or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[newObj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(newObj, characterOffset)
self._presentObject(obj, 0)
else:
# Translators: this is for navigating document content by
# moving from 'large object' to 'large object'. A 'large
# object' is a logical chunk of text, such as a paragraph,
# a list, a table, etc. This is a detailed message which
# will be presented to the user if no more large objects
# can be found.
#
full = _("No more large objects.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Combo Boxes #
# #
########################
def _comboBoxBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst combo boxes.
"""
bindings = {}
# Translators: this is for navigating among combo boxes in a form
# within a document.
#
prevDesc = _("Goes to previous combo box.")
bindings["previous"] = ["c", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among combo boxes in a form
# within a document.
#
nextDesc = _("Goes to next combo box.")
bindings["next"] = ["c", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _comboBoxCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating combo boxes
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_COMBO_BOX]
state = [pyatspi.STATE_FOCUSABLE, pyatspi.STATE_SENSITIVE]
stateMatch = collection.MATCH_ALL
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _comboBoxPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a combo box.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_COMBO_BOX:
state = obj.getState()
isMatch = state.contains(pyatspi.STATE_FOCUSABLE) \
and state.contains(pyatspi.STATE_SENSITIVE)
return isMatch
def _comboBoxPresentation(self, obj, arg=None):
"""Presents the combo box or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
obj.queryComponent().grabFocus()
else:
# Translators: this is for navigating document content by
# moving from combo box to combo box in a form. This is a
# detailed message which will be presented to the user if
# no more checkboxes can be found.
#
full = _("No more combo boxes.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Entries #
# #
########################
def _entryBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst entries.
"""
bindings = {}
# Translators: this is for navigating among text entries in a form
# within a document.
#
prevDesc = _("Goes to previous entry.")
bindings["previous"] = ["e", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among text entries
# in a form.
#
nextDesc = _("Goes to next entry.")
bindings["next"] = ["e", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _entryCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating entries
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_DOCUMENT_FRAME,
pyatspi.ROLE_ENTRY,
pyatspi.ROLE_PASSWORD_TEXT,
pyatspi.ROLE_TEXT]
roleMatch = collection.MATCH_ANY
state = [pyatspi.STATE_FOCUSABLE,
pyatspi.STATE_SENSITIVE,
pyatspi.STATE_EDITABLE]
stateMatch = collection.MATCH_ALL
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role,
matchRoles=roleMatch,
applyPredicate=True)
def _entryPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is an entry.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() in [pyatspi.ROLE_DOCUMENT_FRAME,
pyatspi.ROLE_ENTRY,
pyatspi.ROLE_PASSWORD_TEXT,
pyatspi.ROLE_TEXT]:
state = obj.getState()
isMatch = state.contains(pyatspi.STATE_FOCUSABLE) \
and state.contains(pyatspi.STATE_SENSITIVE) \
and state.contains(pyatspi.STATE_EDITABLE)
return isMatch
def _entryPresentation(self, obj, arg=None):
"""Presents the entry or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
obj.queryComponent().grabFocus()
else:
# Translators: this is for navigating document content by
# moving from text entry to text entry in a form. This is
# a detailed message which will be presented to the user if
# no more text entries can be found.
#
full = _("No more entries.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Form Fields #
# #
########################
def _formFieldBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst form fields.
"""
bindings = {}
# Translators: this is for navigating among fields in a form within
# a document.
#
prevDesc = _("Goes to previous form field.")
bindings["previous"] = ["Tab",
settings.ORCA_SHIFT_MODIFIER_MASK,
prevDesc]
# Translators: this is for navigating among fields in a form within
# a document.
#
nextDesc = _("Goes to next form field.")
bindings["next"] = ["Tab", settings.ORCA_MODIFIER_MASK, nextDesc]
return bindings
def _formFieldCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating form fields
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = self.FORM_ROLES
roleMatch = collection.MATCH_ANY
state = [pyatspi.STATE_FOCUSABLE, pyatspi.STATE_SENSITIVE]
stateMatch = collection.MATCH_ALL
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role,
matchRoles=roleMatch,
applyPredicate=True)
def _formFieldPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a form field.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() in self.FORM_ROLES:
state = obj.getState()
isMatch = state.contains(pyatspi.STATE_FOCUSABLE) \
and state.contains(pyatspi.STATE_SENSITIVE)
return isMatch
def _formFieldPresentation(self, obj, arg=None):
"""Presents the form field or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
if obj.getRole() in [pyatspi.ROLE_LIST, pyatspi.ROLE_COMBO_BOX]:
obj.queryComponent().grabFocus()
else:
# TODO: I think we should just grab focus on the object
# regardless of the object type. But that's not what we
# do now, and it causes an extra newline character to show
# up in the regression test output for entries, so for the
# purpose of passing the regression tests, I'm not making
# that change yet.
#
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentObject(obj, characterOffset)
else:
# Translators: this is for navigating document content by
# moving from form field to form filed. This is a detailed
# message which will be presented to the user if no more form
# field can be found.
#
full = _("No more form fields.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Headings #
# #
########################
def _headingBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst headings.
"""
bindings = {}
# Translators: this is for navigating in a document by heading.
# (e.g. <h1>)
#
prevDesc = _("Goes to previous heading.")
bindings["previous"] = ["h", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating in a document by heading.
# (e.g., <h1>)
#
nextDesc = _("Goes to next heading.")
bindings["next"] = ["h", settings.NO_MODIFIER_MASK, nextDesc]
prevAtLevelBindings = []
nextAtLevelBindings = []
minLevel, maxLevel = self._headingLevels()
for i in range(minLevel, maxLevel + 1):
# Translators: this is for navigating in a document by heading.
# (e.g. <h1> is a heading at level 1).
#
prevDesc = _("Goes to previous heading at level %d.") % i
prevAtLevelBindings.append([str(i),
settings.SHIFT_MODIFIER_MASK,
prevDesc])
# Translators: this is for navigating in a document by heading.
# (e.g. <h1> is a heading at level 1).
#
nextDesc = _("Goes to next heading at level %d.") % i
nextAtLevelBindings.append([str(i),
settings.NO_MODIFIER_MASK,
nextDesc])
bindings["previousAtLevel"] = prevAtLevelBindings
bindings["nextAtLevel"] = nextAtLevelBindings
return bindings
def _headingLevels(self):
"""Returns the [minimum heading level, maximum heading level]
which should be navigable via structural navigation.
"""
return [1, 6]
def _headingCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating headings
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_HEADING]
attrs = []
if arg:
attrs.append('level:%d' % arg)
return MatchCriteria(collection,
roles=role,
objAttrs=attrs)
def _headingPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a heading.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_HEADING:
if arg:
isMatch = (arg == self._getHeadingLevel(obj))
else:
isMatch = True
return isMatch
def _headingPresentation(self, obj, arg=None):
"""Presents the heading or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentObject(obj, characterOffset)
elif not arg:
# Translators: this is for navigating HTML content by moving from
# heading to heading (e.g. <h1>, <h2>, etc). This string is the
# detailed message which Orca will present if there are no more
# headings found.
#
full = _("No more headings.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
else:
# Translators: this is for navigating HTML content by moving from
# heading to heading at a particular level (i.e. only <h1> or only
# <h2>, etc.) This string is the detailed message which Orca will
# present if there are no more headings found at the desired level.
#
full = _("No more headings at level %d.") % arg
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Landmarks #
# #
########################
def _landmarkBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst landmarks.
"""
bindings = {}
# Translators: this is for navigating to the previous ARIA
# role landmark. ARIA role landmarks are the W3C defined
# HTML tag attribute 'role' used to identify important part
# of webpage like banners, main context, search etc.
#
prevDesc = _("Goes to previous landmark.")
bindings["previous"] = ["m", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating to the next ARIA
# role landmark. ARIA role landmarks are the W3C defined
# HTML tag attribute 'role' used to identify important part
# of webpage like banners, main context, search etc.
#
nextDesc = _("Goes to next landmark.")
bindings["next"] = ["m", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _landmarkCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating landmarks
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
# NOTE: there is a limitation in the AT-SPI Collections interface
# when it comes to an attribute whose value can be a list. For
# example, the xml-roles attribute can be a space-separate list
# of roles. We'd like to make a match if the xml-roles attribute
# has one (or any) of the roles we care about. Instead, we're
# restricted to an exact match. So, the below will only work in
# the cases where the xml-roles attribute value consists solely of a
# single role. In practice, this seems to be the case that we run
# into for the landmark roles.
#
attrs = []
for landmark in settings.ariaLandmarks:
attrs.append('xml-roles:' + landmark)
return MatchCriteria(collection, objAttrs=attrs)
def _landmarkPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a landmark.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj is None:
return False
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
try:
if set(attrs['xml-roles']).intersection(\
set(settings.ariaLandmarks)):
return True
else:
return False
except KeyError:
return False
def _landmarkPresentation(self, obj, arg=None):
"""Presents the landmark or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentObject(obj, characterOffset)
else:
# Translators: this is for navigating to the previous ARIA
# role landmark. ARIA role landmarks are the W3C defined
# HTML tag attribute 'role' used to identify important part
# of webpage like banners, main context, search etc. This
# is an indication that one was not found.
#
full = _("No landmark found.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Lists #
# #
########################
def _listBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst (un)ordered lists.
"""
bindings = {}
# Translators: this is for navigating among bulleted/numbered
# lists in a document.
#
prevDesc = _("Goes to previous list.")
bindings["previous"] = ["l", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among bulleted/numbered
# lists in a document.
#
nextDesc = _("Goes to next list.")
bindings["next"] = ["l", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _listCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating (un)ordered
lists by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_LIST]
state = [pyatspi.STATE_FOCUSABLE]
stateMatch = collection.MATCH_NONE
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _listPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is an (un)ordered list.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_LIST:
isMatch = not obj.getState().contains(pyatspi.STATE_FOCUSABLE)
return isMatch
def _listPresentation(self, obj, arg=None):
"""Presents the (un)ordered list or indicates that one was not
found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
# TODO: Ultimately it should be the job of the speech (and braille)
# generator to present things like this.
#
if obj:
nItems = 0
for child in obj:
if child.getRole() == pyatspi.ROLE_LIST_ITEM:
nItems += 1
# Translators: this represents a list in HTML.
#
itemString = ngettext("List with %d item",
"List with %d items",
nItems) % nItems
self._script.presentMessage(itemString)
nestingLevel = 0
parent = obj.parent
while parent.getRole() == pyatspi.ROLE_LIST:
nestingLevel += 1
parent = parent.parent
if nestingLevel:
# Translators: this represents a list item in a document.
# The nesting level is how 'deep' the item is (e.g., a
# level of 2 represents a list item inside a list that's
# inside another list).
#
self._script.presentMessage(_("Nesting level %d") % \
nestingLevel)
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentLine(obj, characterOffset)
else:
# Translators: this is for navigating document content by moving
# from bulleted/numbered list to bulleted/numbered list. This
# string is the detailed message which Orca will present if there
# are no more lists found.
#
full = _("No more lists.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# List Items #
# #
########################
def _listItemBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst items in an (un)ordered list.
"""
bindings = {}
# Translators: this is for navigating among bulleted/numbered list
# items in a document.
#
prevDesc = _("Goes to previous list item.")
bindings["previous"] = ["i", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among bulleted/numbered list
# items in a document.
#
nextDesc = _("Goes to next list item.")
bindings["next"] = ["i", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _listItemCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating items in an
(un)ordered list by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_LIST_ITEM]
state = [pyatspi.STATE_FOCUSABLE]
stateMatch = collection.MATCH_NONE
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _listItemPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is an item in an (un)ordered list.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_LIST_ITEM:
isMatch = not obj.getState().contains(pyatspi.STATE_FOCUSABLE)
return isMatch
def _listItemPresentation(self, obj, arg=None):
"""Presents the (un)ordered list item or indicates that one was not
found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
# TODO: We currently present the line, so that's kept here.
# But we should probably present the object, which would
# be consistent with the change made recently for headings.
#
self._presentLine(obj, characterOffset)
else:
# Translators: this is for navigating document content by
# moving from bulleted/numbered list item to bulleted/
# numbered list item. This string is the detailed message
# which Orca will present if there are no more list items found.
#
full = _("No more list items.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Live Regions #
# #
########################
def _liveRegionBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst live regions.
"""
bindings = {}
# Translators: this is for navigating between live regions
#
prevDesc = _("Goes to previous live region.")
bindings["previous"] = ["d", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating between live regions
#
nextDesc = _("Goes to next live region.")
bindings["next"] = ["d", settings.NO_MODIFIER_MASK, nextDesc]
# Translators: this is for navigating to the last live region
# which made an announcement.
#
desc = _("Goes to the last live region which made an announcement.")
bindings["last"] = ["y", settings.NO_MODIFIER_MASK, desc]
return bindings
def _liveRegionPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a live region.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
regobjs = self._script.liveMngr.getLiveNoneObjects()
if self._script.liveMngr.matchLiveRegion(obj) or obj in regobjs:
isMatch = True
return isMatch
def _liveRegionPresentation(self, obj, arg=None):
"""Presents the live region or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
# TODO: We don't want to move to a list item.
# Is this the best place to handle this?
#
if obj.getRole() == pyatspi.ROLE_LIST:
characterOffset = 0
else:
[obj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(obj, characterOffset)
self._presentObject(obj, characterOffset)
# For debugging
#
self._script.outlineAccessible(obj)
else:
# Translators: this is for navigating HTML in a structural
# manner, where a 'live region' is a location in a web page
# that are updated without having to refresh the entire page.
#
full = _("No more live regions.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Paragraphs #
# #
########################
def _paragraphBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst paragraphs.
"""
bindings = {}
# Translators: this is for navigating among paragraphs in a document.
#
prevDesc = _("Goes to previous paragraph.")
bindings["previous"] = ["p", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among paragraphs in a document.
#
nextDesc = _("Goes to next paragraph.")
bindings["next"] = ["p", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _paragraphCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating paragraphs
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_PARAGRAPH]
return MatchCriteria(collection, roles=role, applyPredicate=True)
def _paragraphPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a paragraph.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_PARAGRAPH:
try:
text = obj.queryText()
# We're choosing 3 characters as the minimum because some
# paragraphs contain a single image or link and a text
# of length 2: An embedded object character and a space.
# We want to skip these.
#
isMatch = text.characterCount > 2
except:
pass
return isMatch
def _paragraphPresentation(self, obj, arg=None):
"""Presents the paragraph or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[newObj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(newObj, characterOffset)
self._presentObject(obj, 0)
else:
# Translators: this is for navigating document content by moving
# from paragraph to paragraph. This string is the detailed message
# which Orca will present if there are no more paragraphs found.
#
full = _("No more paragraphs.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Radio Buttons #
# #
########################
def _radioButtonBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst radio buttons.
"""
bindings = {}
# Translators: this is for navigating among radio buttons in a
# form within a document.
#
prevDesc = _("Goes to previous radio button.")
bindings["previous"] = ["r", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among radio buttons in a
# form within a document.
#
nextDesc = _("Goes to next radio button.")
bindings["next"] = ["r", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _radioButtonCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating radio buttons
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_RADIO_BUTTON]
state = [pyatspi.STATE_FOCUSABLE, pyatspi.STATE_SENSITIVE]
stateMatch = collection.MATCH_ALL
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _radioButtonPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a radio button.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_RADIO_BUTTON:
state = obj.getState()
isMatch = state.contains(pyatspi.STATE_FOCUSABLE) \
and state.contains(pyatspi.STATE_SENSITIVE)
return isMatch
def _radioButtonPresentation(self, obj, arg=None):
"""Presents the radio button or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
obj.queryComponent().grabFocus()
else:
# Translators: this is for navigating in document content by moving
# from radio button to radio button in a form. This string is the
# detailed message which Orca will present if there are no more
# radio buttons found.
#
full = _("No more radio buttons.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Separators #
# #
########################
def _separatorBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst separators.
"""
bindings = {}
# Translators: this is for navigating among separators, such as the
# <hr> tag, in a document.
#
prevDesc = _("Goes to previous separator.")
bindings["previous"] = ["s", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among separators, such as the
# <hr> tag, in a document.
#
nextDesc = _("Goes to next separator.")
bindings["next"] = ["s", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _separatorCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating separators
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_SEPARATOR]
return MatchCriteria(collection, roles=role, applyPredicate=False)
def _separatorPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a separator.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
return obj and obj.getRole() == pyatspi.ROLE_SEPARATOR
def _separatorPresentation(self, obj, arg=None):
"""Presents the separator or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
[newObj, characterOffset] = self._getCaretPosition(obj)
self._setCaretPosition(newObj, characterOffset)
self._presentObject(obj, 0)
else:
# Translators: this is for navigating document content by moving
# amongst separators (e.g. <hr> tags). This string is the detailed
# message which Orca will present if there are no more separators
# found.
#
full = _("No more separators.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Tables #
# #
########################
def _tableBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst tables.
"""
bindings = {}
# Translators: this is for navigating among tables in a document.
#
prevDesc = _("Goes to previous table.")
bindings["previous"] = ["t", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among tables in a document.
#
nextDesc = _("Goes to next table.")
bindings["next"] = ["t", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _tableCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating tables
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_TABLE]
return MatchCriteria(collection, roles=role, applyPredicate=True)
def _tablePredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a table.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj and obj.childCount and obj.getRole() == pyatspi.ROLE_TABLE:
try:
return obj.queryTable().nRows > 0
except:
pass
return False
def _tablePresentation(self, obj, arg=None):
"""Presents the table or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
caption = self._getTableCaption(obj)
if caption:
self._script.presentMessage(caption)
self._script.presentMessage(self._getTableDescription(obj))
cell = obj.queryTable().getAccessibleAt(0, 0)
self.lastTableCell = [0, 0]
[cell, characterOffset] = self._getCaretPosition(cell)
self._setCaretPosition(cell, characterOffset)
self._presentObject(cell, characterOffset)
else:
# Translators: this is for navigating document content by moving
# from table to table. This string is the detailed message which
# Orca will present if there are no more tables found.
#
full = _("No more tables.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Table Cells #
# #
########################
def _tableCellBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating spatially amongst table cells.
"""
bindings = {}
# Translators: this is for navigating among table cells in a document.
#
desc = _("Goes left one cell.")
bindings["left"] = ["Left", settings.SHIFT_ALT_MODIFIER_MASK, desc]
# Translators: this is for navigating among table cells in a document.
#
desc = _("Goes right one cell.")
bindings["right"] = ["Right", settings.SHIFT_ALT_MODIFIER_MASK, desc]
# Translators: this is for navigating among table cells in a document.
#
desc = _("Goes up one cell.")
bindings["up"] = ["Up", settings.SHIFT_ALT_MODIFIER_MASK, desc]
# Translators: this is for navigating among table cells in a document.
#
desc = _("Goes down one cell.")
bindings["down"] = ["Down", settings.SHIFT_ALT_MODIFIER_MASK, desc]
# Translators: this is for navigating among table cells in a document.
#
desc = _("Goes to the first cell in a table.")
bindings["first"] = ["Home", settings.SHIFT_ALT_MODIFIER_MASK, desc]
# Translators: this is for navigating among table cells in a document.
#
desc = _("Goes to the last cell in a table.")
bindings["last"] = ["End", settings.SHIFT_ALT_MODIFIER_MASK, desc]
return bindings
def _tableCellCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating table cells
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_TABLE_CELL, pyatspi.ROLE_COLUMN_HEADER]
return MatchCriteria(collection, roles=role)
def _tableCellPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a table cell.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
return (obj and obj.getRole() in [pyatspi.ROLE_COLUMN_HEADER,
pyatspi.ROLE_TABLE_CELL])
def _tableCellPresentation(self, cell, arg):
"""Presents the table cell or indicates that one was not found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if not cell:
return
if settings.speakCellHeaders:
self._presentCellHeaders(cell, arg)
[obj, characterOffset] = self._getCaretPosition(cell)
self._setCaretPosition(obj, characterOffset)
self._script.updateBraille(obj)
blank = self._isBlankCell(cell)
if not blank:
self._presentObject(cell, 0)
else:
# Translators: "blank" is a short word to mean the
# user has navigated to an empty line.
#
speech.speak(_("blank"))
if settings.speakCellCoordinates:
[row, col] = self.getCellCoordinates(cell)
# Translators: this represents the (row, col) position of
# a cell in a table.
#
self._script.presentMessage(_("Row %(row)d, column %(column)d.") \
% {"row" : row + 1, "column" : col + 1})
spanString = self._getCellSpanInfo(cell)
if spanString and settings.speakCellSpan:
self._script.presentMessage(spanString)
########################
# #
# Unvisited Links #
# #
########################
def _unvisitedLinkBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst unvisited links.
"""
bindings = {}
# Translators: this is for navigating among unvisited links in a
# document.
#
prevDesc = _("Goes to previous unvisited link.")
bindings["previous"] = ["u", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among unvisited links in a
# document.
#
nextDesc = _("Goes to next unvisited link.")
bindings["next"] = ["u", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _unvisitedLinkCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating unvisited links
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_LINK]
state = [pyatspi.STATE_VISITED]
stateMatch = collection.MATCH_NONE
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _unvisitedLinkPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is an unvisited link.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_LINK:
isMatch = not obj.getState().contains(pyatspi.STATE_VISITED)
return isMatch
def _unvisitedLinkPresentation(self, obj, arg=None):
"""Presents the unvisited link or indicates that one was not
found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
# We were counting on the Gecko script's setCaretPosition
# to do the focus grab. It turns out that we do not always
# want setCaretPosition to grab focus on a link (e.g. when
# arrowing in the text of a paragraph which is a child of
# a link. Therefore, we need to grab focus here.
#
obj.queryComponent().grabFocus()
else:
# Translators: this is for navigating document content by moving
# from unvisited link to unvisited link. This string is the
# detailed message which Orca will present if there are no more
# unvisited links found.
#
full = _("No more unvisited links.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
########################
# #
# Visited Links #
# #
########################
def _visitedLinkBindings(self):
"""Returns a dictionary of [keysymstring, modifiers, description]
lists for navigating amongst visited links.
"""
bindings = {}
# Translators: this is for navigating among visited links in a
# document.
#
prevDesc = _("Goes to previous visited link.")
bindings["previous"] = ["v", settings.SHIFT_MODIFIER_MASK, prevDesc]
# Translators: this is for navigating among visited links in a
# document.
#
nextDesc = _("Goes to next visited link.")
bindings["next"] = ["v", settings.NO_MODIFIER_MASK, nextDesc]
return bindings
def _visitedLinkCriteria(self, collection, arg=None):
"""Returns the MatchCriteria to be used for locating visited links
by collection.
Arguments:
- collection: the collection interface for the document
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
role = [pyatspi.ROLE_LINK]
state = [pyatspi.STATE_VISITED]
stateMatch = collection.MATCH_ANY
return MatchCriteria(collection,
states=state,
matchStates=stateMatch,
roles=role)
def _visitedLinkPredicate(self, obj, arg=None):
"""The predicate to be used for verifying that the object
obj is a visited link.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
isMatch = False
if obj and obj.getRole() == pyatspi.ROLE_LINK:
isMatch = obj.getState().contains(pyatspi.STATE_VISITED)
return isMatch
def _visitedLinkPresentation(self, obj, arg=None):
"""Presents the visited link or indicates that one was not
found.
Arguments:
- obj: the accessible object under consideration.
- arg: an optional argument which may need to be included in
the criteria (e.g. the level of a heading).
"""
if obj:
obj.queryComponent().grabFocus()
else:
# Translators: this is for navigating document content by moving
# from visited link to visited link. This string is the detailed
# message which Orca will present if there are no more visited
# links found.
#
full = _("No more visited links.")
# Translators: Orca has a command that allows the user to move
# to the next structural navigation object. In Orca, "structural
# navigation" refers to quickly moving through a document by
# jumping amongst objects of a given type, such as from link to
# link, or from heading to heading, or from form field to form
# field. This is a brief message which will be presented to the
# user if the desired structural navigation object could not be
# found.
#
brief = C_("structural navigation", "Not found")
self._script.presentMessage(full, brief)
| gpl-3.0 | 3,199,177,288,072,333,300 | 39.519347 | 80 | 0.565077 | false |
3dfxsoftware/cbss-addons | account_aged_partner_balance_vw/wizard/__init__.py | 1 | 1312 | # -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Vauxoo - http://www.vauxoo.com/
# All Rights Reserved.
# info Vauxoo ([email protected])
############################################################################
# Coded by: moylop260 ([email protected])
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#import wizard_open_move_line
import wizard_print_report
| gpl-2.0 | 360,285,372,285,675,100 | 45.857143 | 78 | 0.550305 | false |
kazuoteramoto/alot | alot/ui.py | 1 | 18751 | # Copyright (C) 2011-2012 Patrick Totzke <[email protected]>
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
import urwid
import logging
from twisted.internet import reactor, defer
from settings import settings
from buffers import BufferlistBuffer
import commands
from commands import commandfactory
from alot.commands import CommandParseError
from alot.helper import string_decode
import widgets
class InputWrap(urwid.WidgetWrap):
"""
This is the topmost widget used in the widget tree.
Its purpose is to capture and interpret keypresses
by instantiating and applying the relevant :class:`Command` objects
or relaying them to the wrapped `rootwidget`.
"""
def __init__(self, ui, rootwidget):
urwid.WidgetWrap.__init__(self, rootwidget)
self.ui = ui
self.rootwidget = rootwidget
self.select_cancel_only = False
def set_root(self, w):
self._w = w
def get_root(self):
return self._w
def allowed_command(self, cmd):
"""sanity check if the given command should be applied.
This is used in :meth:`keypress`"""
if not self.select_cancel_only:
return True
elif isinstance(cmd, commands.globals.SendKeypressCommand):
if cmd.key in ['select', 'cancel']:
return True
else:
return False
def keypress(self, size, key):
"""overwrites `urwid.WidgetWrap.keypress`"""
mode = self.ui.mode
if self.select_cancel_only:
mode = 'global'
cmdline = settings.get_keybinding(mode, key)
if cmdline:
try:
cmd = commandfactory(cmdline, mode)
if self.allowed_command(cmd):
self.ui.apply_command(cmd)
return None
except CommandParseError, e:
self.ui.notify(e.message, priority='error')
return self._w.keypress(size, key)
class UI(object):
"""
This class integrates all components of alot and offers
methods for user interaction like :meth:`prompt`, :meth:`notify` etc.
It handles the urwid widget tree and mainloop (we use twisted) and is
responsible for opening, closing and focussing buffers.
"""
buffers = []
"""list of active buffers"""
current_buffer = None
"""points to currently active :class:`~alot.buffers.Buffer`"""
dbman = None
"""Database manager (:class:`~alot.db.DBManager`)"""
def __init__(self, dbman, initialcmd):
"""
:param dbman: :class:`~alot.db.DBManager`
:param initialcmd: commandline applied after setting up interface
:type initialcmd: str
:param colourmode: determines which theme to chose
:type colourmode: int in [1,16,256]
"""
self.dbman = dbman
colourmode = int(settings.get('colourmode'))
logging.info('setup gui in %d colours' % colourmode)
global_att = settings.get_theming_attribute('global', 'body')
self.mainframe = urwid.Frame(urwid.SolidFill())
self.mainframe_themed = urwid.AttrMap(self.mainframe, global_att)
self.inputwrap = InputWrap(self, self.mainframe_themed)
self.mainloop = urwid.MainLoop(self.inputwrap,
handle_mouse=False,
event_loop=urwid.TwistedEventLoop(),
unhandled_input=self.unhandeled_input)
self.mainloop.screen.set_terminal_properties(colors=colourmode)
self.show_statusbar = settings.get('show_statusbar')
self.notificationbar = None
self.mode = 'global'
self.commandprompthistory = []
logging.debug('fire first command')
self.apply_command(initialcmd)
self.mainloop.run()
def unhandeled_input(self, key):
"""called if a keypress is not handled."""
logging.debug('unhandled input: %s' % key)
def keypress(self, key):
"""relay all keypresses to our `InputWrap`"""
self.inputwrap.keypress((150, 20), key)
def show_as_root_until_keypress(self, w, key, relay_rest=True,
afterwards=None):
def oe():
self.inputwrap.set_root(self.mainframe)
self.inputwrap.select_cancel_only = False
if callable(afterwards):
logging.debug('called')
afterwards()
logging.debug('relay: %s' % relay_rest)
helpwrap = widgets.CatchKeyWidgetWrap(w, key, on_catch=oe,
relay_rest=relay_rest)
self.inputwrap.set_root(helpwrap)
self.inputwrap.select_cancel_only = not relay_rest
def prompt(self, prefix, text=u'', completer=None, tab=0, history=[]):
"""prompt for text input
:param prefix: text to print before the input field
:type prefix: str
:param text: initial content of the input field
:type text: str
:param completer: completion object to use
:type completer: :meth:`alot.completion.Completer`
:param tab: number of tabs to press initially
(to select completion results)
:type tab: int
:param history: history to be used for up/down keys
:type history: list of str
:returns: a :class:`twisted.defer.Deferred`
"""
d = defer.Deferred() # create return deferred
oldroot = self.inputwrap.get_root()
def select_or_cancel(text):
# restore main screen and invoke callback
# (delayed return) with given text
self.inputwrap.set_root(oldroot)
self.inputwrap.select_cancel_only = False
d.callback(text)
prefix = prefix + settings.get('prompt_suffix')
#set up widgets
leftpart = urwid.Text(prefix, align='left')
editpart = widgets.CompleteEdit(completer, on_exit=select_or_cancel,
edit_text=text, history=history)
for i in range(tab): # hit some tabs
editpart.keypress((0,), 'tab')
# build promptwidget
both = urwid.Columns(
[
('fixed', len(prefix), leftpart),
('weight', 1, editpart),
])
att = settings.get_theming_attribute('global', 'prompt')
both = urwid.AttrMap(both, att)
# put promptwidget as overlay on main widget
overlay = urwid.Overlay(both, oldroot,
('fixed left', 0),
('fixed right', 0),
('fixed bottom', 1),
None)
self.inputwrap.set_root(overlay)
self.inputwrap.select_cancel_only = True
return d # return deferred
def exit(self):
"""
shuts down user interface without cleaning up.
Use a :class:`commands.globals.ExitCommand` for a clean shutdown.
"""
exit_msg = None
try:
reactor.stop()
except Exception as e:
exit_msg = 'Could not stop reactor: {}.'.format(e)
logging.error(exit_msg + '\nShutting down anyway..')
def buffer_open(self, buf):
"""register and focus new :class:`~alot.buffers.Buffer`."""
if self.current_buffer is not None:
offset = settings.get('bufferclose_focus_offset') * -1
currentindex = self.buffers.index(self.current_buffer)
self.buffers.insert(currentindex + offset, buf)
else:
self.buffers.append(buf)
self.buffer_focus(buf)
def buffer_close(self, buf):
"""
closes given :class:`~alot.buffers.Buffer`.
This it removes it from the bufferlist and calls its cleanup() method.
"""
buffers = self.buffers
if buf not in buffers:
string = 'tried to close unknown buffer: %s. \n\ni have:%s'
logging.error(string % (buf, self.buffers))
elif self.current_buffer == buf:
logging.info('closing current buffer %s' % buf)
index = buffers.index(buf)
buffers.remove(buf)
offset = settings.get('bufferclose_focus_offset')
nextbuffer = buffers[(index + offset) % len(buffers)]
self.buffer_focus(nextbuffer)
buf.cleanup()
else:
string = 'closing buffer %d:%s'
logging.info(string % (buffers.index(buf), buf))
buffers.remove(buf)
buf.cleanup()
def buffer_focus(self, buf):
"""focus given :class:`~alot.buffers.Buffer`."""
if buf not in self.buffers:
logging.error('tried to focus unknown buffer')
else:
if self.current_buffer != buf:
self.current_buffer = buf
self.inputwrap.set_root(self.mainframe_themed)
self.mode = buf.modename
if isinstance(self.current_buffer, BufferlistBuffer):
self.current_buffer.rebuild()
self.update()
def get_deep_focus(self, startfrom=None):
"""return the bottom most focussed widget of the widget tree"""
if not startfrom:
startfrom = self.current_buffer
if 'get_focus' in dir(startfrom):
focus = startfrom.get_focus()
if isinstance(focus, tuple):
focus = focus[0]
if isinstance(focus, urwid.Widget):
return self.get_deep_focus(startfrom=focus)
return startfrom
def get_buffers_of_type(self, t):
"""
returns currently open buffers for a given subclass of
:class:`alot.buffer.Buffer`
"""
return filter(lambda x: isinstance(x, t), self.buffers)
def clear_notify(self, messages):
"""
clears notification popups. Call this to ged rid of messages that don't
time out.
:param messages: The popups to remove. This should be exactly
what :meth:`notify` returned when creating the popup
"""
newpile = self.notificationbar.widget_list
for l in messages:
if l in newpile:
newpile.remove(l)
if newpile:
self.notificationbar = urwid.Pile(newpile)
else:
self.notificationbar = None
self.update()
def choice(self, message, choices={'y': 'yes', 'n': 'no'},
select=None, cancel=None, msg_position='above'):
"""
prompt user to make a choice
:param message: string to display before list of choices
:type message: unicode
:param choices: dict of possible choices
:type choices: dict: keymap->choice (both str)
:param select: choice to return if enter/return is hit. Ignored if set
to `None`.
:type select: str
:param cancel: choice to return if escape is hit. Ignored if set to
`None`.
:type cancel: str
:param msg_position: determines if `message` is above or left of the
prompt. Must be `above` or `left`.
:type msg_position: str
:returns: a :class:`twisted.defer.Deferred`
"""
assert select in choices.values() + [None]
assert cancel in choices.values() + [None]
assert msg_position in ['left', 'above']
d = defer.Deferred() # create return deferred
oldroot = self.inputwrap.get_root()
def select_or_cancel(text):
self.inputwrap.set_root(oldroot)
self.inputwrap.select_cancel_only = False
d.callback(text)
#set up widgets
msgpart = urwid.Text(message)
choicespart = widgets.ChoiceWidget(choices, callback=select_or_cancel,
select=select, cancel=cancel)
# build widget
if msg_position == 'left':
both = urwid.Columns(
[
('fixed', len(message), msgpart),
('weight', 1, choicespart),
], dividechars=1)
else: # above
both = urwid.Pile([msgpart, choicespart])
att = settings.get_theming_attribute('global', 'prompt')
both = urwid.AttrMap(both, att, att)
# put promptwidget as overlay on main widget
overlay = urwid.Overlay(both, oldroot,
('fixed left', 0),
('fixed right', 0),
('fixed bottom', 1),
None)
self.inputwrap.set_root(overlay)
self.inputwrap.select_cancel_only = True
return d # return deferred
def notify(self, message, priority='normal', timeout=0, block=False):
"""
opens notification popup
:param message: message to print
:type message: str
:param priority: priority string, used to format the popup: currently,
'normal' and 'error' are defined. If you use 'X' here,
the attribute 'global_notify_X' is used to format the
popup.
:type priority: str
:param timeout: seconds until message disappears. Defaults to the value
of 'notify_timeout' in the general config section.
A negative value means never time out.
:type timeout: int
:param block: this notification blocks until a keypress is made
:type block: bool
:returns: an urwid widget (this notification) that can be handed to
:meth:`clear_notify` for removal
"""
def build_line(msg, prio):
cols = urwid.Columns([urwid.Text(msg)])
att = settings.get_theming_attribute('global', 'notify_' + prio)
return urwid.AttrMap(cols, att)
msgs = [build_line(message, priority)]
if not self.notificationbar:
self.notificationbar = urwid.Pile(msgs)
else:
newpile = self.notificationbar.widget_list + msgs
self.notificationbar = urwid.Pile(newpile)
self.update()
def clear(*args):
self.clear_notify(msgs)
if block:
# put "cancel to continue" widget as overlay on main widget
txt = urwid.Text('(cancel continues)')
overlay = urwid.Overlay(txt, self.mainframe,
('fixed left', 0),
('fixed right', 0),
('fixed bottom', 0),
None)
self.show_as_root_until_keypress(overlay, 'cancel',
relay_rest=False,
afterwards=clear)
else:
if timeout >= 0:
if timeout == 0:
timeout = settings.get('notify_timeout')
self.mainloop.set_alarm_in(timeout, clear)
return msgs[0]
def update(self):
"""redraw interface"""
#who needs a header?
#head = urwid.Text('notmuch gui')
#h=urwid.AttrMap(head, 'header')
#self.mainframe.set_header(h)
# body
if self.current_buffer:
self.mainframe.set_body(self.current_buffer)
# footer
lines = []
if self.notificationbar: # .get_text()[0] != ' ':
lines.append(self.notificationbar)
if self.show_statusbar:
lines.append(self.build_statusbar())
if lines:
self.mainframe.set_footer(urwid.Pile(lines))
else:
self.mainframe.set_footer(None)
# force a screen redraw
if self.mainloop.screen.started:
self.mainloop.draw_screen()
def build_statusbar(self):
"""construct and return statusbar widget"""
info = {}
cb = self.current_buffer
btype = None
if cb is not None:
info = cb.get_info()
btype = cb.modename
info['buffer_no'] = self.buffers.index(cb)
info['buffer_type'] = btype
info['total_messages'] = self.dbman.count_messages('*')
info['pending_writes'] = len(self.dbman.writequeue)
lefttxt = righttxt = u''
if cb is not None:
lefttxt, righttxt = settings.get(btype + '_statusbar', (u'', u''))
lefttxt = string_decode(lefttxt, 'UTF-8')
lefttxt = lefttxt.format(**info)
righttxt = string_decode(righttxt, 'UTF-8')
righttxt = righttxt.format(**info)
footerleft = urwid.Text(lefttxt, align='left')
pending_writes = len(self.dbman.writequeue)
if pending_writes > 0:
righttxt = ('|' * pending_writes) + ' ' + righttxt
footerright = urwid.Text(righttxt, align='right')
columns = urwid.Columns([
footerleft,
('fixed', len(righttxt), footerright)])
footer_att = settings.get_theming_attribute('global', 'footer')
return urwid.AttrMap(columns, footer_att)
def apply_command(self, cmd):
"""
applies a command
This calls the pre and post hooks attached to the command,
as well as :meth:`cmd.apply`.
:param cmd: an applicable command
:type cmd: :class:`~alot.commands.Command`
"""
if cmd:
# call pre- hook
if cmd.prehook:
logging.info('calling pre-hook')
try:
cmd.prehook(ui=self, dbm=self.dbman)
except:
logging.exception('prehook failed')
return False
# define (callback) function that invokes post-hook
def call_posthook(retval_from_apply):
if cmd.posthook:
logging.info('calling post-hook')
try:
cmd.posthook(ui=self, dbm=self.dbman)
except:
logging.exception('posthook failed')
# define error handler for Failures/Exceptions
# raised in cmd.apply()
def errorHandler(failure):
logging.error(failure.getTraceback())
msg = "Error: %s,\n(check the log for details)"
self.notify(msg % failure.getErrorMessage(), priority='error')
# call cmd.apply
logging.info('apply command: %s' % cmd)
d = defer.maybeDeferred(cmd.apply, self)
d.addErrback(errorHandler)
d.addCallback(call_posthook)
| gpl-3.0 | -3,591,981,657,515,845,000 | 36.880808 | 79 | 0.55997 | false |
ardoi/datajuicer | lsjuicer/ui/widgets/panels/eventpanel.py | 1 | 3918 | from PyQt5 import QtWidgets as QW
from PyQt5 import QtCore as QC
from lsjuicer.inout.db.sqla import SyntheticData
from lsjuicer.ui.widgets.fileinfowidget import MyFormLikeLayout
from lsjuicer.ui.widgets.clicktrees import EventClickTree, Events
from actionpanel import ActionPanel
from lsjuicer.ui.widgets.mergewidget import MergeDialog
from lsjuicer.ui.widgets.deletewidget import DeleteDialog
class EventPanel(ActionPanel):
__doc__ = """Event display panel"""
__shortname__ = "Events"
active_events_changed = QC.pyqtSignal()
def setup_ui(self):
layout = QW.QVBoxLayout()
combo_layout = MyFormLikeLayout()
layout.addLayout(combo_layout)
self.setLayout(layout)
self.events = None
region_select = QW.QComboBox()
for i,reg in enumerate(self.analysis.fitregions):
region_select.addItem("{}".format(i))
region_select.currentIndexChanged.connect(self.region_changed)
combo_layout.add_row("Region:", region_select)
result_select = QW.QComboBox()
combo_layout.add_row("Result:", result_select)
self.result_select = result_select
result_select.currentIndexChanged.connect(self.result_changed)
clicktree = EventClickTree(self)
self.clicktree = clicktree
layout.addWidget(clicktree)
region_select.setCurrentIndex(0)
self.region_changed(0)
set_data_pb = QW.QPushButton("Set data")
set_data_pb.clicked.connect(self.set_data)
merge_pb = QW.QPushButton("Merge events")
merge_pb.clicked.connect(self.merge_events)
delete_pb = QW.QPushButton("Delete events")
delete_pb.clicked.connect(self.delete_events)
layout.addWidget(set_data_pb)
layout.addWidget(merge_pb)
layout.addWidget(delete_pb)
def _selected_events(self):
selected_events = []
for event_type in self.events.event_dict:
for i, event in enumerate(self.events.event_dict[event_type]):
status = self.events.status_dict[event_type][i]
if status:
selected_events.append(event.id)
return selected_events
def set_data(self):
events_to_show = self._selected_events()
sdata = SyntheticData(self.result)
new = sdata.get_events(events_to_show)
self.imagedata.replace_channel(new, 2)
self.active_events_changed.emit()
def merge_events(self):
events_to_merge = self._selected_events()
if len(events_to_merge) < 2:
QW.QMessageBox.warning(self,'Not enough events',
"At least two events have to be selected for merging")
return
dialog = MergeDialog(events_to_merge,self)
res = dialog.exec_()
if res:
self.result_changed(self.result_select.currentIndex())
def delete_events(self):
events_to_delete = self._selected_events()
if len(events_to_delete) < 1:
QW.QMessageBox.warning(self,'Not enough events',
"At least one event has to be selected for deletion")
return
dialog = DeleteDialog(events_to_delete,self)
res = dialog.exec_()
if res:
self.result_changed(self.result_select.currentIndex())
def region_changed(self, reg_no):
print "\nREgion changed"
self.region = self.analysis.fitregions[reg_no]
self.result_select.clear()
print reg_no, self.region
for i,res in enumerate(self.region.results):
self.result_select.addItem(str(i))
def result_changed(self, res_no):
print "\nResult changed"
self.result = self.region.results[res_no]
print res_no, self.result
self.events = Events()
for ev in self.result.events:
self.events.add_event(ev)
self.clicktree.set_events(self.events)
| gpl-3.0 | 376,748,179,821,630,000 | 36.673077 | 74 | 0.639612 | false |
nicholas-maltbie/Medina | AIPractice/tttTest.py | 1 | 1416 | from ttt import *
from tttGameSpec import TicTacToeGameSpec
def play_game(agent1, agent2, name1, name2):
"""Plays a game of tic tac toe with two agents and returns the winner."""
game_spec = TicTacToeGameSpec()
return game_spec.play_game(agent1, agent2)
"""board = make_board()
names = [name1, name2]
players = [agent1, agent2]
pieces = [-1,1]
current = random.randint(0,1)
while check_winner(board) == None:
print(get_board_as_numbers(board, pieces[current], pieces[(current + 1) % 2]))
move = players[current](board, pieces[current])
apply_move(board, move)
current = (current + 1) % 2
win = check_winner(board)
if win == 'o':
return name2
elif win == 'x':
return name1
else:
return 'tie'"""
if __name__ == "__main__":
distrib = {'player1':0, 'player2':0, 'tie':0}
plays = 1
for i in range(plays):
distrib[play_game(make_random_agent(), make_human_agent(), \
'player1', 'player2')] += 1;
print('player1 won ' + str(distrib['player1']) + ' times ' + \
str(int(distrib['player1'] / plays * 100)) + "%")
print('player2 won ' + str(distrib['player2']) + ' times ' + \
str(int(distrib['player2'] / plays * 100)) + "%")
print('tied ' + str(distrib['tie']) + ' times ' + \
str(int(distrib['tie'] / plays * 100)) + "%")
| mit | 6,103,644,358,303,188,000 | 36.263158 | 86 | 0.558616 | false |
Azure/azure-sdk-for-python | sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2020_06_01/aio/operations/_app_service_environments_operations.py | 1 | 167660 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AppServiceEnvironmentsOperations:
"""AppServiceEnvironmentsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.AppServiceEnvironmentCollection"]:
"""Get all App Service Environments for a subscription.
Description for Get all App Service Environments for a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AppServiceEnvironmentCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.AppServiceEnvironmentCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceEnvironmentCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AppServiceEnvironmentCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Web/hostingEnvironments'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.AppServiceEnvironmentCollection"]:
"""Get all App Service Environments in a resource group.
Description for Get all App Service Environments in a resource group.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AppServiceEnvironmentCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.AppServiceEnvironmentCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceEnvironmentCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AppServiceEnvironmentCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments'} # type: ignore
async def get(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> "_models.AppServiceEnvironmentResource":
"""Get the properties of an App Service Environment.
Description for Get the properties of an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AppServiceEnvironmentResource, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_06_01.models.AppServiceEnvironmentResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceEnvironmentResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AppServiceEnvironmentResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
name: str,
hosting_environment_envelope: "_models.AppServiceEnvironmentResource",
**kwargs: Any
) -> "_models.AppServiceEnvironmentResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceEnvironmentResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(hosting_environment_envelope, 'AppServiceEnvironmentResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AppServiceEnvironmentResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AppServiceEnvironmentResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('AppServiceEnvironmentResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
name: str,
hosting_environment_envelope: "_models.AppServiceEnvironmentResource",
**kwargs: Any
) -> AsyncLROPoller["_models.AppServiceEnvironmentResource"]:
"""Create or update an App Service Environment.
Description for Create or update an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param hosting_environment_envelope: Configuration details of the App Service Environment.
:type hosting_environment_envelope: ~azure.mgmt.web.v2020_06_01.models.AppServiceEnvironmentResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AppServiceEnvironmentResource or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.web.v2020_06_01.models.AppServiceEnvironmentResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceEnvironmentResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
name=name,
hosting_environment_envelope=hosting_environment_envelope,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('AppServiceEnvironmentResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
name: str,
force_delete: Optional[bool] = None,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if force_delete is not None:
query_parameters['forceDelete'] = self._serialize.query("force_delete", force_delete, 'bool')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
name: str,
force_delete: Optional[bool] = None,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete an App Service Environment.
Description for Delete an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param force_delete: Specify :code:`<code>true</code>` to force the deletion even if the App
Service Environment contains resources. The default is :code:`<code>false</code>`.
:type force_delete: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
name=name,
force_delete=force_delete,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}'} # type: ignore
async def update(
self,
resource_group_name: str,
name: str,
hosting_environment_envelope: "_models.AppServiceEnvironmentPatchResource",
**kwargs: Any
) -> "_models.AppServiceEnvironmentResource":
"""Create or update an App Service Environment.
Description for Create or update an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param hosting_environment_envelope: Configuration details of the App Service Environment.
:type hosting_environment_envelope: ~azure.mgmt.web.v2020_06_01.models.AppServiceEnvironmentPatchResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AppServiceEnvironmentResource, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_06_01.models.AppServiceEnvironmentResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServiceEnvironmentResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(hosting_environment_envelope, 'AppServiceEnvironmentPatchResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('AppServiceEnvironmentResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('AppServiceEnvironmentResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('AppServiceEnvironmentResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}'} # type: ignore
def list_capacities(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> AsyncIterable["_models.StampCapacityCollection"]:
"""Get the used, available, and total worker capacity an App Service Environment.
Description for Get the used, available, and total worker capacity an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StampCapacityCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.StampCapacityCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StampCapacityCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_capacities.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('StampCapacityCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_capacities.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/capacities/compute'} # type: ignore
async def get_vip_info(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> "_models.AddressResponse":
"""Get IP addresses assigned to an App Service Environment.
Description for Get IP addresses assigned to an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AddressResponse, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_06_01.models.AddressResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AddressResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get_vip_info.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AddressResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_vip_info.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/capacities/virtualip'} # type: ignore
async def _change_vnet_initial(
self,
resource_group_name: str,
name: str,
vnet_info: "_models.VirtualNetworkProfile",
**kwargs: Any
) -> "_models.WebAppCollection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebAppCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._change_vnet_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vnet_info, 'VirtualNetworkProfile')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('WebAppCollection', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('WebAppCollection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_change_vnet_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/changeVirtualNetwork'} # type: ignore
async def begin_change_vnet(
self,
resource_group_name: str,
name: str,
vnet_info: "_models.VirtualNetworkProfile",
**kwargs: Any
) -> AsyncLROPoller[AsyncItemPaged["_models.WebAppCollection"]]:
"""Move an App Service Environment to a different VNET.
Description for Move an App Service Environment to a different VNET.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param vnet_info: Details for the new virtual network.
:type vnet_info: ~azure.mgmt.web.v2020_06_01.models.VirtualNetworkProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns an iterator like instance of either WebAppCollection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.WebAppCollection]]
:raises ~azure.core.exceptions.HttpResponseError:
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebAppCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = "application/json"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.change_vnet.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vnet_info, 'VirtualNetworkProfile')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vnet_info, 'VirtualNetworkProfile')
body_content_kwargs['content'] = body_content
request = self._client.get(url, query_parameters, header_parameters, **body_content_kwargs)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('WebAppCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebAppCollection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._change_vnet_initial(
resource_group_name=resource_group_name,
name=name,
vnet_info=vnet_info,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
async def internal_get_next(next_link=None):
if next_link is None:
return pipeline_response
else:
return await get_next(next_link)
return AsyncItemPaged(
internal_get_next, extract_data
)
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_change_vnet.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/changeVirtualNetwork'} # type: ignore
async def list_diagnostics(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> List["_models.HostingEnvironmentDiagnostics"]:
"""Get diagnostic information for an App Service Environment.
Description for Get diagnostic information for an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of HostingEnvironmentDiagnostics, or the result of cls(response)
:rtype: list[~azure.mgmt.web.v2020_06_01.models.HostingEnvironmentDiagnostics]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.HostingEnvironmentDiagnostics"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.list_diagnostics.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[HostingEnvironmentDiagnostics]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_diagnostics.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/diagnostics'} # type: ignore
async def get_diagnostics_item(
self,
resource_group_name: str,
name: str,
diagnostics_name: str,
**kwargs: Any
) -> "_models.HostingEnvironmentDiagnostics":
"""Get a diagnostics item for an App Service Environment.
Description for Get a diagnostics item for an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param diagnostics_name: Name of the diagnostics item.
:type diagnostics_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: HostingEnvironmentDiagnostics, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_06_01.models.HostingEnvironmentDiagnostics
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.HostingEnvironmentDiagnostics"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get_diagnostics_item.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'diagnosticsName': self._serialize.url("diagnostics_name", diagnostics_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('HostingEnvironmentDiagnostics', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_diagnostics_item.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/diagnostics/{diagnosticsName}'} # type: ignore
def get_inbound_network_dependencies_endpoints(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> AsyncIterable["_models.InboundEnvironmentEndpointCollection"]:
"""Get the network endpoints of all inbound dependencies of an App Service Environment.
Description for Get the network endpoints of all inbound dependencies of an App Service
Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either InboundEnvironmentEndpointCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.InboundEnvironmentEndpointCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.InboundEnvironmentEndpointCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_inbound_network_dependencies_endpoints.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('InboundEnvironmentEndpointCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_inbound_network_dependencies_endpoints.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/inboundNetworkDependenciesEndpoints'} # type: ignore
def list_multi_role_pools(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> AsyncIterable["_models.WorkerPoolCollection"]:
"""Get all multi-role pools.
Description for Get all multi-role pools.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WorkerPoolCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.WorkerPoolCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkerPoolCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_multi_role_pools.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('WorkerPoolCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_multi_role_pools.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools'} # type: ignore
async def get_multi_role_pool(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> "_models.WorkerPoolResource":
"""Get properties of a multi-role pool.
Description for Get properties of a multi-role pool.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkerPoolResource, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_06_01.models.WorkerPoolResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkerPoolResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get_multi_role_pool.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('WorkerPoolResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_multi_role_pool.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default'} # type: ignore
async def _create_or_update_multi_role_pool_initial(
self,
resource_group_name: str,
name: str,
multi_role_pool_envelope: "_models.WorkerPoolResource",
**kwargs: Any
) -> "_models.WorkerPoolResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkerPoolResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_multi_role_pool_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(multi_role_pool_envelope, 'WorkerPoolResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('WorkerPoolResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('WorkerPoolResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_multi_role_pool_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default'} # type: ignore
async def begin_create_or_update_multi_role_pool(
self,
resource_group_name: str,
name: str,
multi_role_pool_envelope: "_models.WorkerPoolResource",
**kwargs: Any
) -> AsyncLROPoller["_models.WorkerPoolResource"]:
"""Create or update a multi-role pool.
Description for Create or update a multi-role pool.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param multi_role_pool_envelope: Properties of the multi-role pool.
:type multi_role_pool_envelope: ~azure.mgmt.web.v2020_06_01.models.WorkerPoolResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either WorkerPoolResource or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.web.v2020_06_01.models.WorkerPoolResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkerPoolResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_multi_role_pool_initial(
resource_group_name=resource_group_name,
name=name,
multi_role_pool_envelope=multi_role_pool_envelope,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('WorkerPoolResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_multi_role_pool.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default'} # type: ignore
async def update_multi_role_pool(
self,
resource_group_name: str,
name: str,
multi_role_pool_envelope: "_models.WorkerPoolResource",
**kwargs: Any
) -> "_models.WorkerPoolResource":
"""Create or update a multi-role pool.
Description for Create or update a multi-role pool.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param multi_role_pool_envelope: Properties of the multi-role pool.
:type multi_role_pool_envelope: ~azure.mgmt.web.v2020_06_01.models.WorkerPoolResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkerPoolResource, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_06_01.models.WorkerPoolResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkerPoolResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_multi_role_pool.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(multi_role_pool_envelope, 'WorkerPoolResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('WorkerPoolResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('WorkerPoolResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_multi_role_pool.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default'} # type: ignore
def list_multi_role_pool_instance_metric_definitions(
self,
resource_group_name: str,
name: str,
instance: str,
**kwargs: Any
) -> AsyncIterable["_models.ResourceMetricDefinitionCollection"]:
"""Get metric definitions for a specific instance of a multi-role pool of an App Service Environment.
Description for Get metric definitions for a specific instance of a multi-role pool of an App
Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param instance: Name of the instance in the multi-role pool.
:type instance: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceMetricDefinitionCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.ResourceMetricDefinitionCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceMetricDefinitionCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_multi_role_pool_instance_metric_definitions.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'instance': self._serialize.url("instance", instance, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ResourceMetricDefinitionCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_multi_role_pool_instance_metric_definitions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/instances/{instance}/metricdefinitions'} # type: ignore
def list_multi_role_metric_definitions(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> AsyncIterable["_models.ResourceMetricDefinitionCollection"]:
"""Get metric definitions for a multi-role pool of an App Service Environment.
Description for Get metric definitions for a multi-role pool of an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceMetricDefinitionCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.ResourceMetricDefinitionCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceMetricDefinitionCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_multi_role_metric_definitions.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ResourceMetricDefinitionCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_multi_role_metric_definitions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/metricdefinitions'} # type: ignore
def list_multi_role_pool_skus(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> AsyncIterable["_models.SkuInfoCollection"]:
"""Get available SKUs for scaling a multi-role pool.
Description for Get available SKUs for scaling a multi-role pool.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SkuInfoCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.SkuInfoCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SkuInfoCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_multi_role_pool_skus.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SkuInfoCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_multi_role_pool_skus.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/skus'} # type: ignore
def list_multi_role_usages(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> AsyncIterable["_models.UsageCollection"]:
"""Get usage metrics for a multi-role pool of an App Service Environment.
Description for Get usage metrics for a multi-role pool of an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsageCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.UsageCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UsageCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_multi_role_usages.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('UsageCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_multi_role_usages.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/multiRolePools/default/usages'} # type: ignore
async def list_operations(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> List["_models.Operation"]:
"""List all currently running operations on the App Service Environment.
Description for List all currently running operations on the App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of Operation, or the result of cls(response)
:rtype: list[~azure.mgmt.web.v2020_06_01.models.Operation]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.Operation"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.list_operations.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[Operation]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_operations.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/operations'} # type: ignore
def get_outbound_network_dependencies_endpoints(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> AsyncIterable["_models.OutboundEnvironmentEndpointCollection"]:
"""Get the network endpoints of all outbound dependencies of an App Service Environment.
Description for Get the network endpoints of all outbound dependencies of an App Service
Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OutboundEnvironmentEndpointCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.OutboundEnvironmentEndpointCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OutboundEnvironmentEndpointCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_outbound_network_dependencies_endpoints.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('OutboundEnvironmentEndpointCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_outbound_network_dependencies_endpoints.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/outboundNetworkDependenciesEndpoints'} # type: ignore
async def reboot(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> None:
"""Reboot all machines in an App Service Environment.
Description for Reboot all machines in an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.reboot.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
reboot.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/reboot'} # type: ignore
async def _resume_initial(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> "_models.WebAppCollection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebAppCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self._resume_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('WebAppCollection', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('WebAppCollection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_resume_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/resume'} # type: ignore
async def begin_resume(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> AsyncLROPoller[AsyncItemPaged["_models.WebAppCollection"]]:
"""Resume an App Service Environment.
Description for Resume an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns an iterator like instance of either WebAppCollection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.WebAppCollection]]
:raises ~azure.core.exceptions.HttpResponseError:
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebAppCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.resume.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('WebAppCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebAppCollection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._resume_initial(
resource_group_name=resource_group_name,
name=name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
async def internal_get_next(next_link=None):
if next_link is None:
return pipeline_response
else:
return await get_next(next_link)
return AsyncItemPaged(
internal_get_next, extract_data
)
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_resume.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/resume'} # type: ignore
def list_app_service_plans(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> AsyncIterable["_models.AppServicePlanCollection"]:
"""Get all App Service plans in an App Service Environment.
Description for Get all App Service plans in an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AppServicePlanCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.AppServicePlanCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AppServicePlanCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_app_service_plans.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AppServicePlanCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_app_service_plans.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/serverfarms'} # type: ignore
def list_web_apps(
self,
resource_group_name: str,
name: str,
properties_to_include: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.WebAppCollection"]:
"""Get all apps in an App Service Environment.
Description for Get all apps in an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param properties_to_include: Comma separated list of app properties to include.
:type properties_to_include: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WebAppCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.WebAppCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebAppCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_web_apps.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if properties_to_include is not None:
query_parameters['propertiesToInclude'] = self._serialize.query("properties_to_include", properties_to_include, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('WebAppCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_web_apps.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/sites'} # type: ignore
async def _suspend_initial(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> "_models.WebAppCollection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebAppCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self._suspend_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('WebAppCollection', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('WebAppCollection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_suspend_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/suspend'} # type: ignore
async def begin_suspend(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> AsyncLROPoller[AsyncItemPaged["_models.WebAppCollection"]]:
"""Suspend an App Service Environment.
Description for Suspend an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns an iterator like instance of either WebAppCollection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.WebAppCollection]]
:raises ~azure.core.exceptions.HttpResponseError:
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebAppCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.suspend.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('WebAppCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebAppCollection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._suspend_initial(
resource_group_name=resource_group_name,
name=name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
async def internal_get_next(next_link=None):
if next_link is None:
return pipeline_response
else:
return await get_next(next_link)
return AsyncItemPaged(
internal_get_next, extract_data
)
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_suspend.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/suspend'} # type: ignore
def list_usages(
self,
resource_group_name: str,
name: str,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.CsmUsageQuotaCollection"]:
"""Get global usage metrics of an App Service Environment.
Description for Get global usage metrics of an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param filter: Return only usages/metrics specified in the filter. Filter conforms to odata
syntax. Example: $filter=(name.value eq 'Metric1' or name.value eq 'Metric2') and startTime eq
2014-01-01T00:00:00Z and endTime eq 2014-12-31T23:59:59Z and timeGrain eq
duration'[Hour|Minute|Day]'.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CsmUsageQuotaCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.CsmUsageQuotaCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CsmUsageQuotaCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_usages.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str', skip_quote=True)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CsmUsageQuotaCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_usages.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/usages'} # type: ignore
def list_worker_pools(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> AsyncIterable["_models.WorkerPoolCollection"]:
"""Get all worker pools of an App Service Environment.
Description for Get all worker pools of an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WorkerPoolCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.WorkerPoolCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkerPoolCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_worker_pools.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('WorkerPoolCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_worker_pools.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools'} # type: ignore
async def get_worker_pool(
self,
resource_group_name: str,
name: str,
worker_pool_name: str,
**kwargs: Any
) -> "_models.WorkerPoolResource":
"""Get properties of a worker pool.
Description for Get properties of a worker pool.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param worker_pool_name: Name of the worker pool.
:type worker_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkerPoolResource, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_06_01.models.WorkerPoolResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkerPoolResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get_worker_pool.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'workerPoolName': self._serialize.url("worker_pool_name", worker_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('WorkerPoolResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_worker_pool.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}'} # type: ignore
async def _create_or_update_worker_pool_initial(
self,
resource_group_name: str,
name: str,
worker_pool_name: str,
worker_pool_envelope: "_models.WorkerPoolResource",
**kwargs: Any
) -> "_models.WorkerPoolResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkerPoolResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_worker_pool_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'workerPoolName': self._serialize.url("worker_pool_name", worker_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(worker_pool_envelope, 'WorkerPoolResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('WorkerPoolResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('WorkerPoolResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_worker_pool_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}'} # type: ignore
async def begin_create_or_update_worker_pool(
self,
resource_group_name: str,
name: str,
worker_pool_name: str,
worker_pool_envelope: "_models.WorkerPoolResource",
**kwargs: Any
) -> AsyncLROPoller["_models.WorkerPoolResource"]:
"""Create or update a worker pool.
Description for Create or update a worker pool.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param worker_pool_name: Name of the worker pool.
:type worker_pool_name: str
:param worker_pool_envelope: Properties of the worker pool.
:type worker_pool_envelope: ~azure.mgmt.web.v2020_06_01.models.WorkerPoolResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either WorkerPoolResource or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.web.v2020_06_01.models.WorkerPoolResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkerPoolResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_worker_pool_initial(
resource_group_name=resource_group_name,
name=name,
worker_pool_name=worker_pool_name,
worker_pool_envelope=worker_pool_envelope,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('WorkerPoolResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'workerPoolName': self._serialize.url("worker_pool_name", worker_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_worker_pool.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}'} # type: ignore
async def update_worker_pool(
self,
resource_group_name: str,
name: str,
worker_pool_name: str,
worker_pool_envelope: "_models.WorkerPoolResource",
**kwargs: Any
) -> "_models.WorkerPoolResource":
"""Create or update a worker pool.
Description for Create or update a worker pool.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param worker_pool_name: Name of the worker pool.
:type worker_pool_name: str
:param worker_pool_envelope: Properties of the worker pool.
:type worker_pool_envelope: ~azure.mgmt.web.v2020_06_01.models.WorkerPoolResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkerPoolResource, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_06_01.models.WorkerPoolResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkerPoolResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_worker_pool.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'workerPoolName': self._serialize.url("worker_pool_name", worker_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(worker_pool_envelope, 'WorkerPoolResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('WorkerPoolResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('WorkerPoolResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_worker_pool.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}'} # type: ignore
def list_worker_pool_instance_metric_definitions(
self,
resource_group_name: str,
name: str,
worker_pool_name: str,
instance: str,
**kwargs: Any
) -> AsyncIterable["_models.ResourceMetricDefinitionCollection"]:
"""Get metric definitions for a specific instance of a worker pool of an App Service Environment.
Description for Get metric definitions for a specific instance of a worker pool of an App
Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param worker_pool_name: Name of the worker pool.
:type worker_pool_name: str
:param instance: Name of the instance in the worker pool.
:type instance: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceMetricDefinitionCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.ResourceMetricDefinitionCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceMetricDefinitionCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_worker_pool_instance_metric_definitions.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'workerPoolName': self._serialize.url("worker_pool_name", worker_pool_name, 'str'),
'instance': self._serialize.url("instance", instance, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ResourceMetricDefinitionCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_worker_pool_instance_metric_definitions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/instances/{instance}/metricdefinitions'} # type: ignore
def list_web_worker_metric_definitions(
self,
resource_group_name: str,
name: str,
worker_pool_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ResourceMetricDefinitionCollection"]:
"""Get metric definitions for a worker pool of an App Service Environment.
Description for Get metric definitions for a worker pool of an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param worker_pool_name: Name of the worker pool.
:type worker_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceMetricDefinitionCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.ResourceMetricDefinitionCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceMetricDefinitionCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_web_worker_metric_definitions.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'workerPoolName': self._serialize.url("worker_pool_name", worker_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ResourceMetricDefinitionCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_web_worker_metric_definitions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/metricdefinitions'} # type: ignore
def list_worker_pool_skus(
self,
resource_group_name: str,
name: str,
worker_pool_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SkuInfoCollection"]:
"""Get available SKUs for scaling a worker pool.
Description for Get available SKUs for scaling a worker pool.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param worker_pool_name: Name of the worker pool.
:type worker_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SkuInfoCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.SkuInfoCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SkuInfoCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_worker_pool_skus.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'workerPoolName': self._serialize.url("worker_pool_name", worker_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SkuInfoCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_worker_pool_skus.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/skus'} # type: ignore
def list_web_worker_usages(
self,
resource_group_name: str,
name: str,
worker_pool_name: str,
**kwargs: Any
) -> AsyncIterable["_models.UsageCollection"]:
"""Get usage metrics for a worker pool of an App Service Environment.
Description for Get usage metrics for a worker pool of an App Service Environment.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the App Service Environment.
:type name: str
:param worker_pool_name: Name of the worker pool.
:type worker_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsageCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_06_01.models.UsageCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.UsageCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_web_worker_usages.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'workerPoolName': self._serialize.url("worker_pool_name", worker_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('UsageCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_web_worker_usages.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/hostingEnvironments/{name}/workerPools/{workerPoolName}/usages'} # type: ignore
| mit | 2,373,434,903,715,900,400 | 50.651263 | 272 | 0.634325 | false |
crs4/hl7apy | hl7apy/v2_8_2/__init__.py | 1 | 3070 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2018, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import
import importlib
from hl7apy.base_datatypes import WD
from .messages import MESSAGES
from .segments import SEGMENTS
from .fields import FIELDS
from .datatypes import DATATYPES, DATATYPES_STRUCTS
from .groups import GROUPS
from .tables import TABLES
from ..v2_7.base_datatypes import ST, FT, ID, IS, TX, GTS, SNM
from hl7apy.exceptions import ChildNotFound
ELEMENTS = {'Message': MESSAGES, 'Group': GROUPS, 'Segment': SEGMENTS,
'Field': FIELDS, 'Component': DATATYPES, 'SubComponent': DATATYPES,
'Datatypes_Structs': DATATYPES_STRUCTS, 'Table': TABLES}
def get(name, element_type):
try:
return ELEMENTS[element_type][name]
except KeyError:
raise ChildNotFound(name)
def find(name, where):
"""
>>> from hl7apy.core import Segment
>>> from hl7apy import find_reference
>>> find_reference('UNKNOWN', (Segment, ), '2.8.2') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ChildNotFound: No child named UNKNOWN
"""
for cls in where:
try:
return {'ref': get(name, cls.__name__), 'name': name, 'cls': cls}
except ChildNotFound:
pass
raise ChildNotFound(name)
def is_base_datatype(datatype):
return datatype in BASE_DATATYPES
def get_base_datatypes():
return BASE_DATATYPES
def _load_base_datatypes():
base_dts = ('DT', 'DTM', 'NM', 'SI', 'TM',)
module = importlib.import_module("hl7apy.base_datatypes")
dts = {}
for cls in base_dts:
cls = getattr(module, cls)
dts[cls.__name__] = cls
return dts
BASE_DATATYPES = _load_base_datatypes()
BASE_DATATYPES.update({
'ST': ST,
'FT': FT,
'ID': ID,
'IS': IS,
'TX': TX,
'GTS': GTS,
'SNM': SNM,
'WD': WD,
})
DT = BASE_DATATYPES['DT']
DTM = BASE_DATATYPES['DTM']
NM = BASE_DATATYPES['NM']
SI = BASE_DATATYPES['SI']
TM = BASE_DATATYPES['TM']
| mit | 2,264,156,724,461,546,500 | 30.010101 | 92 | 0.684691 | false |
DTL-FAIRData/ODEX4all-UseCases | EKP/tmp/NIZO2.py | 1 | 5816 | # Load the required packages
import EKP
import csv
import os
import datetime
# Knowledge platform URL
url = ''
# User credentials: Please fill in!
username = ''
password = ''
# Set the output directory
os.chdir("NIZO input & Output/")
# Get the user token, required for access
t = EKP.getToken(username, password, url).json()['token']
# Get the semantic types contained in the database, and their codes
Types = EKP.getSemanticTypeDict(url, t)
# Read in the input file
input_file = open("List commensal species Qin et al 19_10_2015.csv", "r")
reader = csv.reader(input_file, delimiter=";")
commensals = []
for line in reader:
commensals.append(line[0])
input_file.close()
input_group = "Bacterium"
input_ids = {}
for c in commensals:
ID = EKP.getID(url, Types, t, c, input_group)
if len(ID) > 0:
input_ids.update({ID[0]['name']: ID[0]['id']})
endpoints = {"Gut dysmotility" : "C1839757",
"bowel/gut problem" : "C1656426",
"Inflammatory Bowel Diseases" : "C0021390",
"Intestinal mucosal permeability" : "C0232645",
"Permeability" : "C0232645",
"body barrier" : "C0682585"
}
intermediate_types = { "Food" : "Objects",
"Organ or Tissue Function" : "Physiology",
#"Gene or Genome" : "Genes & Molecular Sequences",
"Finding" : "Disorders",
"Disease or Syndrome" : "Disorders",
"Chemical Viewed Functionally" : "Chemicals & Drugs",
"Biologically Active Substance" : "Chemicals & Drugs",
"Tissue" : "Anatomy",
"Body Location or Region" : "Anatomy",
"Body Part, Organ, or Organ Component" : "Anatomy",
"Body Space or Junction" : "Anatomy",
"Body System" : "Anatomy",
"Cell" : "Anatomy"
}
# Alle concepten die met Gut te maken hebben gebruiken als filter
gut = EKP.getID(url, Types, t, "C0699819")
intestines = EKP.getID(url, Types, t, "C0021853")
endpoint_ids = []
for point in endpoints.values():
endpoint_ids.append(EKP.getID(url, Types, t, point)[0]['id'])
endpoint_ids = list(set(endpoint_ids))
for input in input_ids.values():
print(EKP.getRelationships([input], endpoint_ids, url, t))
indirect_all = []
gut_all = []
intestines_all = []
for key, value in intermediate_types.items():
gut_connected = EKP.getDirectlyConnectedConcepts(Types, t, url, [gut[0]['id']], value, key)
if 'content' in gut_connected.keys() and len(gut_connected['content']) > 0:
for g in gut_connected['content']:
gut_all.append(g['tier1Concept']['gi'])
intestines_connected = EKP.getDirectlyConnectedConcepts(Types, t, url, [intestines[0]['id']], value, key)
if 'content' in intestines_connected.keys() and len(intestines_connected['content']) > 0:
for g in intestines_connected['content']:
intestines_all.append(g['tier1Concept']['gi'])
response = EKP.getIndirectRelationships(list(input_ids.values()), endpoint_ids, Types, url, t, value, key)
print(response)
if 'content' in response.keys():
indirect_all.append(response['content'])
indirect_out = open("indirect_output_" + datetime.datetime.today().strftime("%Y_%m_%d") + ".csv", "w")
iw = csv.writer(indirect_out, delimiter = ";")
iw.writerow(["Starting concept", "Predicate1", "Sources1", "Connecting concept", "Semantic category", "Semantic types", "Found in gut?", "Found in intestines?", "Predicate2", "Sources2", "End concept", "Path weight"])
indirect_all2 = []
for ii in indirect_all:
indirect_all2 = indirect_all2 + ii
for i in indirect_all2:
start = i['tier0Concept']['name']
intermediate = i['tier1Concept']['name']
intermediate_cat = i['tier1Concept']['category']
intermediate_concept = EKP.getConcept(i['tier1Concept']['gi'], url, t)
output_STs = []
for g in intermediate_concept['semanticTypes']:
for key, value in Types[0].items():
if g == value:
output_STs.append(key)
# Hier logica om te filteren op gut & intestines
if i['tier1Concept']['gi'] in gut_all:
gut_bool = "gut"
if i['tier1Concept']['gi'] not in gut_all:
gut_bool = "no"
if i['tier1Concept']['gi'] in intestines_all:
intestines_bool = "intestines"
if i['tier1Concept']['gi'] not in intestines_all:
intestines_bool = "no"
end = i['tier2Concept']['name']
pw = i['pathWeight']
nrows = max([len(i['tier01TripleInformation']), len(i['tier12TripleInformation'])])
pubs1 = []
pubs2 = []
for w in range(0,nrows):
if w <= len(i['tier01TripleInformation']) - 1:
predicate1 = i['tier01TripleInformation'][w]['predicateName']
pub_info = EKP.getPublications(i['tier01TripleInformation'][w]['tripleUuid'], url, t)
for p1 in pub_info['publications']:
if p1['publicationInfo'] is not None and 'url' in p1['publicationInfo'].keys():
pubs1.append(p1['publicationInfo']['url'])
if w <= len(i['tier12TripleInformation']) - 1:
predicate2 = i['tier12TripleInformation'][w]['predicateName']
pub_info2 = EKP.getPublications(i['tier12TripleInformation'][w]['tripleUuid'], url, t)
for p2 in pub_info2['publications']:
if p2['publicationInfo'] is not None and 'url' in p2['publicationInfo'].keys():
pubs2.append(p2['publicationInfo']['url'])
iw.writerow([start, predicate1, pubs1, intermediate, intermediate_cat, output_STs, gut_bool, intestines_bool, predicate2, pubs2, end, pw])
indirect_out.close()
| mit | -8,227,296,890,107,231,000 | 39.388889 | 217 | 0.607634 | false |
Khurramjaved96/Recursive-CNNs | data_augmentor/augmentData.py | 1 | 2668 | import os
import cv2
import numpy as np
import utils
def argsProcessor():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--dataPath", help="DataPath")
parser.add_argument("-o", "--outputFiles", help="outputFiles", default="bar")
return parser.parse_args()
args = argsProcessor()
output_dir = args.outputFiles
if (not os.path.isdir(output_dir)):
os.mkdir(output_dir)
dir = args.dataPath
import csv
with open(output_dir+"/gt.csv", 'a') as csvfile:
spamwriter_1 = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for image in os.listdir(dir):
if image.endswith("jpg") or image.endswith("JPG"):
if os.path.isfile(dir+"/"+image+".csv"):
with open(dir+"/"+image+ ".csv", 'r') as csvfile:
spamwriter = csv.reader(csvfile, delimiter=' ',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
img = cv2.imread(dir +"/"+ image)
print (image)
gt= []
for row in spamwriter:
gt.append(row)
# img = cv2.circle(img, (int(float(row[0])), int(float(row[1]))), 2,(255,0,0),90)
gt =np.array(gt).astype(np.float32)
gt = gt / (img.shape[1], img.shape[0])
gt = gt * (1080, 1080)
img = cv2.resize(img, (1080, 1080))
print (gt)
for angle in range(0,271,90):
img_rotate, gt_rotate = utils.rotate(img, gt, angle)
for random_crop in range(0,16):
img_crop, gt_crop = utils.random_crop(img_rotate, gt_rotate)
mah_size = img_crop.shape
img_crop = cv2.resize(img_crop, (64, 64))
gt_crop = np.array(gt_crop)
# gt_crop = gt_crop*(1.0 / mah_size[1],1.0 / mah_size[0])
# for a in range(0,4):
# no=0
# for a in range(0,4):
# no+=1
# cv2.circle(img_crop, tuple(((gt_crop[a]*64).astype(int))), 2,(255-no*60,no*60,0),9)
# # # cv2.imwrite("asda.jpg", img)
cv2.imwrite(output_dir + "/" +str(angle)+str(random_crop)+ image, img_crop)
spamwriter_1.writerow((str(angle)+str(random_crop)+ image, tuple(list(gt_crop))))
| apache-2.0 | 4,897,393,317,941,922,000 | 38.820896 | 117 | 0.463268 | false |
vsemionov/npamp | npamp/output.py | 1 | 11750 |
# Copyright (C) 2012 Victor Semionov
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import os
import traceback
import numpy as np
import params
import plot
div_line = "=" * 32
status_writing = "generating output"
output_dir = None
models_rel_path = "pumping"
ref_pulse_rel_path = "ref_pulse"
optimization_rel_path = "optimization"
opt_pump_rel_path = os.path.join(optimization_rel_path, "pumping")
opt_geom_rel_path = os.path.join(optimization_rel_path, "geometry")
alt_plot_rel_path = "alt"
x_label = "x [mm]"
y_label = "y [mm]"
z_label = "z [mm]"
rho_label = "r [mm]"
t_amp_label = "t [ns]"
i_label = "pulse num."
norm_t_label = "t/T"
density_rel_label = "rel. photon density"
density_norm_rel_label = "norm. photon density"
upper_rel_label = "rel. upper state population"
lower_rel_label = "rel. lower state population"
inversion_rel_label = "rel. population inversion"
inversion_abs_label = "population inversion [cm^-3]"
t_pump_label = "t [us]"
pump_duration_label = "pump duration [us]"
pump_power_label = "pump power [W]"
eff_power_density_label = "absorbed power density [W/cm^3]"
rate_label = "depopulation rate [cm^-3 s^-1]"
rate_rel_label = "depop. rate / inversion [s^-1]"
gain_label = "small-signal gain"
error_label = "rel. error"
inversion_rdiff_label = "inversion rel. difference [%]"
gain_rdiff_label = "gain rel. difference [%]"
energy_rel_label = "energy gain"
energy_abs_pump_label = "optical pump energy [J]"
energy_abs_stored_label = "stored energy [J]"
energy_abs_pulse_label = "output energy [mJ]"
rel_gain_decrease_label = "rel. gain decrease [%]"
fluence_rel_label = "rel. fluence"
fluence_norm_rel_label = "norm. fluence"
fluence_abs_label_energy = "max. output fluence [J/cm^2]"
medium_radius_label = "medium diameter [mm]"
beam_radius_label = "beam diameter [mm]"
extraction_eff_label = "extraction efficiency [%]"
total_eff_label = "optical to optical efficiency [%]"
lower_lifetime_legend = r"$\tau_1 \, = \, %s$"
lower_lifetime_unit = "ns"
def warn(message):
print >>sys.stderr, "%s: %s" % ("warning:", message)
def print_error(message, hint=None):
print >>sys.stderr, "%s: %s" % ("error", message)
if hint:
print >>sys.stderr, hint
def print_exception():
t, v, _ = sys.exc_info()
fmt = traceback.format_exception_only(t, v)
exc_msg = fmt[-1][:-1]
print >>sys.stderr, exc_msg
def show_status((i, j), (si, sj), done):
def print_status():
if j is not None:
print "%d, %d" % (i, j)
else:
print i
if si != 0:
if done:
print_status()
else:
if i % si == 0:
if j is None:
print_status()
else:
if sj == 0:
if j == 0:
print_status()
else:
if j % sj == 0:
print_status()
def init_dir(name):
dirname = os.path.join(output_dir, name)
if not os.path.isdir(dirname):
os.makedirs(dirname)
return dirname
def plot_inversion(dirname, inv):
filename = lambda name: os.path.join(dirname, name)
T = inv.T
inversion = inv.inversion
tlim = (T[0], T[-1])
plot.plot_data(filename("inversion_evo"), "Population Inversion Evolution", (T, None, tlim, t_pump_label), (inversion, None, None, inversion_abs_label))
def plot_output(dirname, input_beam, input_pulse, fwhm, amp, fluences, exact_density_out=None, exact_population_final=None):
filename = lambda name: os.path.join(dirname, name)
density = amp.density
population = amp.population
upper = population[0]
lower = population[1]
inversion = upper - lower
Z = amp.Z
T = amp.T
if params.output_rel_time:
T = T / fwhm
TZ, ZT = np.meshgrid(T, Z)
zlim = (Z[0], Z[-1])
tlim = (T[0], T[-1])
ref_density = input_pulse.ref_density
ref_inversion = amp.active_medium.initial_inversion.ref_inversion
out_t_label = norm_t_label if params.output_rel_time else t_amp_label
stride_z = max(len(amp.Z) // params.out_count_z, 1)
stride_t = max(len(amp.T) // params.out_count_t, 1)
plot.plot_data(filename("density_in"), "Input Photon Density", (T, None, tlim, out_t_label), (density[0]/ref_density, None, None, density_rel_label))
plot.plot_data(filename("density_out"), "Output Photon Density", (T, None, tlim, out_t_label), (density[-1]/ref_density, None, None, density_rel_label))
plot.plot_data(filename("densities"), "Input and Output Photon Density", ((T, ) * 2, None, tlim, out_t_label), ((density[0]/ref_density, density[-1]/ref_density), None, None, density_rel_label), ("input pulse", "output pulse"))
plot.plot_data(filename("densities_norm"), "Normalized Input and Output Photon Density", ((T, ) * 2, None, tlim, out_t_label), ((density[0]/ref_density, density[-1]/np.amax(density[-1])), None, None, density_norm_rel_label), ("input pulse", "output pulse"))
plot.plot_data(filename("upper_init"), "Initial Upper State Population", (Z, None, zlim, z_label), (upper.T[0]/ref_inversion, None, None, upper_rel_label))
plot.plot_data(filename("upper_final"), "Final Upper State Population", (Z, None, zlim, z_label), (upper.T[-1]/ref_inversion, None, None, upper_rel_label))
plot.plot_data(filename("lower_init"), "Initial Lower State Population", (Z, None, zlim, z_label), (lower.T[0]/ref_inversion, None, None, lower_rel_label))
plot.plot_data(filename("lower_final"), "Final Lower State Population", (Z, None, zlim, z_label), (lower.T[-1]/ref_inversion, None, None, lower_rel_label))
plot.plot_data(filename("inversion_init"), "Initial Population Inversion", (Z, None, zlim, z_label), (inversion.T[0]/ref_inversion, None, None, inversion_rel_label))
plot.plot_data(filename("inversion_final"), "Final Population Inversion", (Z, None, zlim, z_label), (inversion.T[-1]/ref_inversion, None, None, inversion_rel_label))
plot.plot_projection(filename("density_evo"), "Photon Density Evolution", (ZT, None, z_label), (TZ, None, out_t_label), (density/ref_density, None, density_rel_label), (30, -30), (stride_z, stride_t))
plot.plot_projection(filename("upper_evo"), "Upper State Population Evolution", (ZT, None, z_label), (TZ, None, out_t_label), (upper/ref_inversion, None, upper_rel_label), (30, 30), (stride_z, stride_t))
plot.plot_projection(filename("lower_evo"), "Lower State Population Evolution", (ZT, None, z_label), (TZ, None, out_t_label), (lower/ref_inversion, None, lower_rel_label), (30, 30), (stride_z, stride_t))
plot.plot_projection(filename("inversion_evo"), "Population Inversion Evolution", (ZT, None, z_label), (TZ, None, out_t_label), (inversion/ref_inversion, None, inversion_rel_label), (30, 30), (stride_z, stride_t))
if exact_density_out is not None:
plot.plot_error(filename("density_err"), "Photon Density Relative Error", (T, None, tlim, out_t_label), ((exact_density_out, density[-1]), None, None, error_label))
if exact_population_final is not None:
plot.plot_error(filename("inversion_err"), "Population Inversion Relative Error", (Z, None, zlim, z_label), ((exact_population_final[0] - exact_population_final[1], inversion.T[-1]), None, None, error_label))
if amp.active_medium.doping_agent.lower_lifetime != 0.0:
plot.plot_error(filename("upper_err"), "Upper State Population Relative Error", (Z, None, zlim, z_label), ((exact_population_final[0], upper.T[-1]), None, None, error_label))
plot.plot_error(filename("lower_err"), "Lower State Population Relative Error", (Z, None, zlim, z_label), ((exact_population_final[1], lower.T[-1]), None, None, error_label))
norm_fluences = fluences / input_beam.ref_fluence
plot.plot_data(filename("fluence"), "Fluence Evolution", (Z, None, zlim, z_label), (norm_fluences, None, None, fluence_rel_label))
def plot_train(dirname, input_beam, active_medium, output_photon_counts):
filename = lambda name: os.path.join(dirname, name)
pulse_count = len(output_photon_counts)
pulse_nums = np.arange(1, pulse_count + 1)
nlim = (pulse_nums[0] - 1, pulse_nums[-1] + 1)
extra_args = dict(style="o", vlines=True, grid="y") if pulse_count <= 32 else {}
input_photon_count = input_beam.fluence_integral(active_medium.radius)
plot.plot_data(filename("pulse_energy_gain"), "Pulse Energy Gain", (pulse_nums, None, nlim, i_label), (output_photon_counts/input_photon_count, None, None, energy_rel_label), **extra_args)
def plot_beam(dirname, input_beam, Rho, Phi, ref_output_fluence):
filename = lambda name: os.path.join(dirname, name)
if len(Rho) > 1:
vfluence = np.vectorize(input_beam.fluence)
ref_input_fluence = vfluence(*np.meshgrid(Rho, Phi)).T
norm_input_fluence = ref_input_fluence / input_beam.ref_fluence
norm_output_fluence = ref_output_fluence / input_beam.ref_fluence
max_output_fluence = np.amax(norm_output_fluence)
n_ref = -1
for n, phi in enumerate(Phi):
if n_ref < 0 or abs(phi - input_beam.phi_ref) < abs(Phi[n_ref] - input_beam.phi_ref):
n_ref = n
rholim = (Rho[0], Rho[-1])
plot.plot_data(filename("fluences"), "Input and Output Fluence", ((Rho,)*2, None, rholim, rho_label), ((norm_input_fluence[:, n_ref], norm_output_fluence[:, n_ref]), None, None, fluence_rel_label), ("input beam", "output beam"))
plot.plot_data(filename("fluences_norm"), "Normalized Input and Output Fluence", ((Rho,)*2, None, rholim, rho_label), ((norm_input_fluence[:, n_ref], norm_output_fluence[:, n_ref] / max_output_fluence), None, None, fluence_norm_rel_label), ("input beam", "output beam"))
if len(Phi) > 1:
FR, RF = np.meshgrid(Phi, Rho)
XY, YX = RF * np.cos(FR), RF * np.sin(FR)
stride_rho = max(len(Rho) // params.out_count_rho, 1)
stride_phi = max(len(Phi) // params.out_count_phi, 1)
plot.plot_projection(filename("fluence_in"), "Input Fluence", (XY, None, x_label), (YX, None, y_label), (norm_input_fluence, None, fluence_rel_label), (30, -60), (stride_rho, stride_phi))
plot.plot_projection(filename("fluence_out"), "Output Fluence", (XY, None, x_label), (YX, None, y_label), (norm_output_fluence, None, fluence_rel_label), (30, -60), (stride_rho, stride_phi))
| bsd-2-clause | -5,244,082,922,388,500,000 | 49.646552 | 278 | 0.655234 | false |
living180/vex | vex/remove.py | 1 | 1329 | import os
import shutil
from vex import exceptions
def obviously_not_a_virtualenv(path):
include = os.path.join(path, 'include')
bin = os.path.join(path, 'bin')
scripts = os.path.join(path, 'Scripts')
if not os.path.exists(bin) and not os.path.exists(scripts):
return True
if os.path.exists(include) and not any(
filename.startswith('py') for filename in os.listdir(include)
):
return True
return False
def handle_remove(ve_path):
if not os.path.exists(ve_path):
return
if hasattr(os, "geteuid"):
if os.geteuid() == 0 or os.environ.get('USER', '') == 'root':
raise exceptions.VirtualenvNotRemoved(
"not removing any directory as root user")
if ve_path in ("/", "\\"):
raise exceptions.VirtualenvNotRemoved(
"not removing possible root directory {0!r}".format(ve_path))
if ve_path == os.path.expanduser("~"):
raise exceptions.VirtualenvNotRemoved(
"not removing possible home directory {0!r}".format(ve_path))
# last-minute checks
if obviously_not_a_virtualenv(ve_path):
raise exceptions.VirtualenvNotRemoved(
"path {0!r} did not look like a virtualenv".format(ve_path))
print("Removing {0!r}".format(ve_path))
shutil.rmtree(ve_path)
| mit | -4,692,091,804,238,745,000 | 34.918919 | 73 | 0.632054 | false |
cginternals/glkernel | scripts/generate.py | 1 | 19818 |
import posixpath # instead of os.path, to always use forward slashes
import os
import re
# TODOs:
# (more TODOs in code)
standardTypes = {
"bool",
"char",
"short",
"int",
"long",
"long long",
"unsigned char",
"unsigned short",
"unsigned int",
"unsigned long",
"unsigned long long",
"float",
"double",
"long double",
"size_t",
"glm::uint16"
}
# ------------
# large-scale parsing
def findPairedBrace(code):
nl = 1
for i,c in enumerate(code):
if c == '}': nl -= 1
if c == '{': nl += 1
if nl == 0:
return i
def getNamespaces(code):
namespaces = dict()
global namespaceBeginPattern
namespaceBeginPattern = re.compile(r"^namespace(?:\s+(?P<name>\w+))?\s*\{", re.M | re.S)
lastEnd = 0
for match in namespaceBeginPattern.finditer(code):
# skip inner namespaces
if match.start() < lastEnd:
continue
nsStart = match.end() # behind opening brace
nsEnd = findPairedBrace(code[nsStart:]) + nsStart # index of closing brace
subNamespaces = getNamespaces(code[nsStart:nsEnd])
namespaces[(nsStart,nsEnd)] = (match.group("name") or "<unnamed>", subNamespaces)
# remember end for skipping inner namespaces
lastEnd = nsEnd
return namespaces
def namespaceAtPosition(namespaces, pos):
for span in namespaces:
if pos in range(*span):
innerNS = namespaceAtPosition(namespaces[span][1], pos - span[0])
return namespaces[span][0] + ("::" + innerNS if innerNS else "")
return ""
# ------------
# small-scale parsing
def removeCVRef(typeString):
return re.sub(r'^(?:const |volatile )*(.*?)(?:\s*&)?$', r'\1', typeString)
def splitParams(paramString):
splitParams = [p.strip() for p in paramString.split(',') if p.strip()]
i = 0
while i < len(splitParams)-1:
if splitParams[i].count('<') != splitParams[i].count('>'):
splitParams[i:i+2] = [splitParams[i] + ", " + splitParams[i+1]]
else:
i += 1
paramDefaults = [(split[0].strip(), split[1].strip() if len(split) > 1 else '') for split in [p.rsplit('=', 1) for p in splitParams]]
paramsSplit = [(l.strip(), r.strip(), d) for l,r,d in [p.rsplit(' ', 1) + [d] for p,d in paramDefaults]]
return paramsSplit
def removeParamDefaults(params):
return [(p[0], p[1]) for p in params]
def getParamNames(params):
return [p[1] for p in params]
def getParamTypes(params):
return [p[0] for p in params]
def getParamDefaults(params):
return [(p[1], p[2]) for p in params if p[2]]
def possibleTypes(argType, templateList):
if re.match("^\w+$", argType): # argType is just single word, e.g. 'T'
if "std::enable_if<std::is_floating_point<"+argType+">::value>::type" in templateList:
return {"float"}
else:
return {"float", "vec2", "vec3", "vec4"}
genVecMatch = re.match("(\w+)\s*<\s*\w+\s*,\s*\w+\s*>", argType) # general glm vector, e.g. 'V<T, P>'
if genVecMatch:
if re.search("template\s*<\s*(?:typename|class)\s*,\s*glm::precision\s*>\s*(?:typename|class)\s*" + genVecMatch.group(1), templateList):
return {"vec2", "vec3", "vec4"}
specVecMatch = re.match("glm::tvec(\d)<.*?>", argType) # specific glm vector, e.g. 'glm::tcev4<T, P>'
if specVecMatch:
return {"vec"+specVecMatch.group(1)}
return {argType}
def paramTypeFromKernelTypes(kernelTypeString, paramTypeString, templateList, enums):
if possibleTypes(paramTypeString, templateList) == {'float'}:
return "float"
strippedTypeString = removeCVRef(paramTypeString)
if kernelTypeString == strippedTypeString: # e.g. 'V<T, P>' and 'const V<T, P>&'
return "same"
if strippedTypeString in kernelTypeString: # e.g. 'const T&' and 'V<T, P>'
return "float"
if strippedTypeString in [e["name"] for e in enums]:
return strippedTypeString
if strippedTypeString in standardTypes:
return strippedTypeString
print("Unknown Type encountered: " + paramTypeString)
def getEnumValues(valueDefString):
definitions = [d.strip() for d in valueDefString.split(',')]
values = []
i = 0
for d in definitions:
if '=' in d:
_, _, expr = d.partition('=')
i = eval(expr, dict(values))
values.append((d,i))
i += 1
return values
# ------------
# generation
def enumForJS(value, enums):
if "::" not in value:
return value
enumDict = {enum["name"]: {valueName:value for valueName, value in enum["values"]} for enum in enums}
enumName, _, valueName = value.partition("::")
if enumName not in enumDict:
# TODO: Warning?
return value
if valueName not in enumDict[enumName]:
# TODO: Warning?
return value
return enumName + "." + valueName
def jsFuncName(func):
name = func["name"]
if "alternativeNumber" in func:
name += str(func["alternativeNumber"])
return "_".join(func["namespace"].split('::')[1:] + [name])
def jsFunction(func, enums):
assert func["namespace"].startswith("glkernel::"), "function \""+func["name"]+"\" from outside glkernel namespace: " + func["namespace"]
namespaceStack = func["namespace"].split("::")
namespaceStack.pop(0) # ignore outmost namespace glkernel
defaultChecks = '\n'.join([" {name} = (typeof {name} !== 'undefined') ? {name} : {default};".format(name=name, default=enumForJS(default, enums)) for name, default in getParamDefaults(func["params"])])
if defaultChecks:
defaultChecks = "\n // Defaults\n" + defaultChecks + "\n"
paramString = ', '.join(getParamNames(func["params"]))
paramStringKomma = "" if not paramString else ', ' + paramString
firstLine = " {name}: function({params}) {{".format(name = func["name"], params = paramString)
finalCall = " _glkernel.{generatedName}(that.kernel{paramsWithKomma});".format(generatedName = jsFuncName(func), paramsWithKomma = paramStringKomma)
jsCode = """{firstLine}{defaultChecks}
{finalCall}
return that;
}}""".format(firstLine = firstLine, defaultChecks = defaultChecks, finalCall = finalCall)
return jsCode
def buildJSNamespaces(funcs, enums):
namespaces = dict()
for func in funcs:
if func["namespace"] not in namespaces:
namespaces[func["namespace"]] = []
namespaces[func["namespace"]].append(jsFunction(func, enums))
nsCodes = []
for ns, codes in sorted(namespaces.items()):
name = ns[len("glkernel::"):]
functionsCode = ",\n".join(codes)
nsCode = " this.{name} = {{\n{funcCodes}\n }};".format(name = name, funcCodes = functionsCode)
nsCodes.append(nsCode)
return "\n".join(nsCodes)
def buildJSEnums(enums):
enumCodes = []
for enum in sorted(enums, key=lambda e: e["name"]):
valueLines = []
for name, value in enum["values"]:
valueLines.append(" " + name + ": " + str(value))
valuesCode = ',\n'.join(valueLines)
enumCode = "{name} = {{\n{members}\n}};".format(name = enum["name"], members = valuesCode)
enumCodes.append(enumCode)
return "\n\n".join(enumCodes)
def buildCPPFunctionAdds(funcs):
return '\n'.join([' addFunction("{name}", this, &JSInterface::{name});'.format(name = jsFuncName(func)) for func in funcs])
def buildCPPFunctionForwardDecl(func, enums):
enumNames = [enum["name"] for enum in enums]
funcName = jsFuncName(func)
# Deduce parameter types
kernelTypes = possibleTypes(func["kernelType"], func["template"])
paramTypes = [paramTypeFromKernelTypes(func["kernelType"], param[0], func["template"], enums) for param in func["params"]]
cases = [(kernelType, [kernelType if param == "same" else param for param in paramTypes]) for kernelType in kernelTypes]
if "alternatives" in func:
for alt in func["alternatives"]:
altKernelTypes = possibleTypes(alt["kernelType"], alt["template"])
altParamTypes = [paramTypeFromKernelTypes(alt["kernelType"], param[0], alt["template"], enums) for param in alt["params"]]
cases += [(kernelType, [kernelType if param == "same" else param for param in altParamTypes]) for kernelType in altKernelTypes]
cases.sort()
typesPerParam = [{case[1][i] for case in cases} for i in range(len(cases[0][1]))]
variantNeeded = [len(types) > 1 for types in typesPerParam]
enumParam = [list(types)[0] in enumNames for types in typesPerParam]
paramTypes = ["cppexpose::Object*"] + ["const cppexpose::Variant&" if needVariant else "int" if isEnum else list(types)[0] for types, needVariant, isEnum in zip(typesPerParam, variantNeeded, enumParam)]
paramNames = ["obj"] + [param[1] for param in func["params"]]
paramList = ", ".join(type + " " + name for type,name in zip(paramTypes, paramNames))
return " void " + funcName + "(" + paramList + ");"
def buildCPPFunctionForwardDecls(funcs, enums):
return '\n'.join([buildCPPFunctionForwardDecl(func, enums) for func in funcs])
def buildCPPIncludes(fileNames):
includeFiles = []
for f in fileNames:
if not "include/" in f:
print("Error: " + f + " is outside include directory!")
continue
while not f.startswith("include/"):
f = f[1:]
f = f[len("include/"):]
includeFiles.append(f)
return '\n'.join(['#include <' + name + '>' for name in includeFiles])
def buildCPPImplementation(func, enums):
enumNames = [enum["name"] for enum in enums]
funcName = jsFuncName(func)
# Deduce parameter types
kernelTypes = possibleTypes(func["kernelType"], func["template"])
paramTypes = [paramTypeFromKernelTypes(func["kernelType"], param[0], func["template"], enums) for param in func["params"]]
cases = [(kernelType, [kernelType if param == "same" else param for param in paramTypes]) for kernelType in kernelTypes]
if "alternatives" in func:
for alt in func["alternatives"]:
altKernelTypes = possibleTypes(alt["kernelType"], alt["template"])
altParamTypes = [paramTypeFromKernelTypes(alt["kernelType"], param[0], alt["template"], enums) for param in alt["params"]]
cases += [(kernelType, [kernelType if param == "same" else param for param in altParamTypes]) for kernelType in altKernelTypes]
cases.sort()
typesPerParam = [{case[1][i] for case in cases} for i in range(len(cases[0][1]))]
variantNeeded = [len(types) > 1 for types in typesPerParam]
enumParam = [list(types)[0] in enumNames for types in typesPerParam]
paramTypes = ["cppexpose::Object*"] + ["const cppexpose::Variant&" if needVariant else "int" if isEnum else list(types)[0] for types, needVariant, isEnum in zip(typesPerParam, variantNeeded, enumParam)]
paramNames = ["obj"] + [param[1] for param in func["params"]]
paramList = ", ".join(type + " " + name for type,name in zip(paramTypes, paramNames))
# Parameters with only one possible type may be handled before branching into kernel types
earlyConv = []
for param, enumType in [(name, list(types)[0]) for name, types, isEnum in zip(paramNames[1:], typesPerParam, enumParam) if isEnum]:
enum = [e for e in enums if e["name"] == enumType][0]
earlyConv.append(" const auto {name}_enum = static_cast<{namespace}::{type}>({name});".format(name=param, type=enum["name"], namespace = enum["namespace"]))
earlyConversions = '\n'.join(earlyConv)
if earlyConversions:
earlyConversions += '\n\n'
# Split cases by kernel type
casesByKernelType = dict()
for kernel, params in cases:
if kernel not in casesByKernelType:
casesByKernelType[kernel] = []
casesByKernelType[kernel].append(params)
# Build code for different kernel types
kernelCases = []
for kernelType, cases in sorted(casesByKernelType.items()):
kernelDim = 1 if kernelType == "float" else int(kernelType[-1])
firstLine = " if (auto kernelObj = dynamic_cast<Kernel" + str(kernelDim) + "Object*>(obj))"
neededVariantChecks = False
# Build code for specific parameter type constellations
paramCases = []
for case in cases:
# Check if variants contain acceptable values
variantChecks = []
for name, type, needsVariant in zip(paramNames[1:], case, variantNeeded):
if not needsVariant:
continue
checkFunction = "canBe" + type[0].upper() + type[1:]
variantChecks.append(checkFunction + "(" + name + ")")
neededVariantChecks = True
# Unpack variants to usable values
variantUnpackers = []
for name, type, needsVariant in zip(paramNames[1:], case, variantNeeded):
if not needsVariant:
continue
convFunction = "variantTo" + type[0].upper() + type[1:]
variantUnpackers.append(" const auto {name}_conv = {func}({name});".format(name = name, func = convFunction))
variantUnpackingCode = '\n'.join(variantUnpackers)
if variantUnpackingCode:
variantUnpackingCode += '\n\n'
finalCallParams = ["kernelObj->kernel()"] + [name + ("_enum" if isEnum else "_conv" if needsVariant else "") for name, isEnum, needsVariant in zip(paramNames[1:], enumParam, variantNeeded)]
finalCallParamString = ', '.join(finalCallParams)
finalCallString = " {namespace}::{name}({params});".format(namespace = func["namespace"], name = func["name"], params = finalCallParamString)
innerCode = "{variants}{finalCall}\n return;".format(variants = variantUnpackingCode, finalCall = finalCallString)
caseCode = innerCode
if variantChecks:
variantCheckCode = ' && '.join(variantChecks)
indentedInnerCode = '\n'.join([(" " + line).rstrip() for line in innerCode.split('\n')])
caseCode = " if ({varChecks})\n {{\n{innerCode}\n }}".format(varChecks = variantCheckCode, innerCode = indentedInnerCode)
paramCases.append(caseCode)
if neededVariantChecks:
paramCases.append(" cppassist::error(\"glkernel-JSInterface\") << \"Invalid parameters for " + funcName + "\";\n return;")
paramCasesCode = '\n\n'.join(paramCases)
kernelCaseCode = "{firstLine}\n {{\n{cases}\n }}".format(firstLine = firstLine, cases = paramCasesCode)
kernelCases.append(kernelCaseCode)
kernelCasesCode = '\n\n'.join(kernelCases)
fullCode = """void JSInterface::{funcName}({paramList})
{{
{earlyConv}{cases}
cppassist::error("glkernel-JSInterface") << "Invalid kernel object for {funcName}";
}}""".format(funcName = funcName, paramList = paramList, earlyConv = earlyConversions, cases = kernelCasesCode)
return fullCode
def buildCPPImplementations(funcs, enums):
return '\n\n\n'.join([buildCPPImplementation(func, enums) for func in funcs])
# ------------
# misc
def dedupeFuncs(funcs):
i = 1
while i < len(funcs):
currentFunc = funcs[i]
for otherFunc in funcs[:i]:
if otherFunc["namespace"] != currentFunc["namespace"]:
continue
if otherFunc["name"] != currentFunc["name"]:
continue
if getParamNames(otherFunc["params"]) == getParamNames(currentFunc["params"]):
# identical in JS -> can be safely removed
funcs.remove(currentFunc)
i -= 1
if "alternatives" not in otherFunc:
otherFunc["alternatives"] = []
otherFunc["alternatives"].append(currentFunc)
break
if "renamedAlternatives" not in otherFunc:
otherFunc["renamedAlternatives"] = 0
otherFunc["renamedAlternatives"] += 1
currentFunc["alternativeNumber"] = otherFunc["renamedAlternatives"]
break
i += 1
# ------------
# main
def main(args):
glkernelIncludeDir = "../source/glkernel/include/glkernel"
sourceFiles = [posixpath.join(glkernelIncludeDir, p) for p in os.listdir(glkernelIncludeDir) if p not in ["Kernel.h", "glm_compatability.h"] and p.endswith(".h")]
funcPattern = re.compile(r"^template\s*<(?P<template>.*?)>$\s*^(?P<return>\w+)\s(?P<name>\w+)\(\s*tkernel<(?P<kernelType>.*?)>\s*&\s*\w+\s*(?P<params>(?:,.*?)*)\);$", re.M | re.S)
enumPattern = re.compile(r"^enum(?:\s+class)?\s+(?P<name>\w+)\s*(?::.*?\s*)?\{(?P<content>.*?)\};$", re.M | re.S)
allFunctions = []
allEnums = []
for f in sourceFiles:
content = ''
with open(f,'r') as file:
content = file.read()
namespaces = getNamespaces(content)
functionMatches = [m for m in funcPattern.finditer(content)]
functions = [{
"name": f.group("name"),
"kernelType": f.group("kernelType"),
"namespace": namespaceAtPosition(namespaces, f.start()),
"params": splitParams(f.group("params")),
"return": f.group("return"),
"template": f.group("template")
} for f in functionMatches]
enumMatches = [m for m in enumPattern.finditer(content)]
enums = [{
"name": e.group("name"),
"values": getEnumValues(e.group("content")),
"namespace": namespaceAtPosition(namespaces, e.start())
} for e in enumMatches]
allFunctions.extend(functions)
allEnums.extend(enums)
dedupeFuncs(allFunctions)
funcsJSCode = buildJSNamespaces(allFunctions, allEnums)
enumJSCode = buildJSEnums(allEnums)
templateDir = args.inDir
cppDestDir = args.cppDir
jsDestDir = args.jsDir
with open(templateDir + "/glkernel.js.template", "r") as templateFile:
with open(jsDestDir + "/glkernel.js", "w") as outFile:
outFile.write(templateFile.read().format(enums=enumJSCode, functions=funcsJSCode))
forwardDecls = buildCPPFunctionForwardDecls(allFunctions, allEnums)
with open(templateDir + "/JSInterface.h.template", "r") as templateFile:
with open(cppDestDir + "/JSInterface.h", "w") as outFile:
outFile.write(templateFile.read().format(functionForwardDecls=forwardDecls))
includes = buildCPPIncludes(sourceFiles)
funcAdds = buildCPPFunctionAdds(allFunctions)
funcImpl = buildCPPImplementations(allFunctions, allEnums)
with open(templateDir + "/JSInterface.cpp.template", "r") as templateFile:
with open(cppDestDir + "/JSInterface.cpp", "w") as outFile:
outFile.write(templateFile.read().format(includes=includes, addFunctionCalls=funcAdds, generatedFunctions=funcImpl))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--templates", "-t", metavar="<dir>", type=str, default=".", dest="inDir", help="directory containing template files")
parser.add_argument("--cpp-dest" , "-c", metavar="<dir>", type=str, default=".", dest="cppDir", help="directory where result .h and .cpp files are written to")
parser.add_argument("--js-dest" , "-j", metavar="<dir>", type=str, default=".", dest="jsDir", help="directory where result .js files are written to")
args = parser.parse_args()
main(args)
| mit | 3,420,205,758,163,455,500 | 37.481553 | 220 | 0.612171 | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-monitor/azure/mgmt/monitor/models/rule_action.py | 1 | 1307 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RuleAction(Model):
"""The action that is performed when the alert rule becomes active, and when
an alert condition is resolved.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: RuleEmailAction, RuleWebhookAction
:param odatatype: Constant filled by server.
:type odatatype: str
"""
_validation = {
'odatatype': {'required': True},
}
_attribute_map = {
'odatatype': {'key': 'odata\\.type', 'type': 'str'},
}
_subtype_map = {
'odatatype': {'Microsoft.Azure.Management.Insights.Models.RuleEmailAction': 'RuleEmailAction', 'Microsoft.Azure.Management.Insights.Models.RuleWebhookAction': 'RuleWebhookAction'}
}
def __init__(self):
self.odatatype = None
| mit | -2,687,682,908,228,144,000 | 32.512821 | 187 | 0.613619 | false |
UManPychron/pychron | pychron/experiment/experimentor.py | 1 | 11553 | # ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import Instance, List, on_trait_change, Bool, Event
from pychron.dvc.dvc_irradiationable import DVCIrradiationable
from pychron.experiment.experiment_executor import ExperimentExecutor
from pychron.experiment.factory import ExperimentFactory
from pychron.experiment.queue.experiment_queue import ExperimentQueue
class Experimentor(DVCIrradiationable):
experiment_factory = Instance(ExperimentFactory)
experiment_queue = Instance(ExperimentQueue)
executor = Instance(ExperimentExecutor)
experiment_queues = List
# stats = Instance(StatsGroup, ())
mode = None
# unique_executor_db = False
save_enabled = Bool
# ===========================================================================
# permissions
# ===========================================================================
# max_allowable_runs = 10000
# can_edit_scripts = True
# _last_ver_time = None
# _ver_timeout = 10
# ===========================================================================
# task events
# ===========================================================================
activate_editor_event = Event
save_event = Event
def prepare_destory(self):
if self.executor:
if self.executor.datahub:
self.executor.datahub.prepare_destroy()
if self.experiment_factory:
if self.experiment_factory.run_factory:
if self.experiment_factory.run_factory.datahub:
self.experiment_factory.run_factory.datahub.prepare_destroy()
def load(self):
self.experiment_factory.queue_factory.db_refresh_needed = True
self.experiment_factory.run_factory.db_refresh_needed = True
return True
def reset_run_generator(self):
if self.executor.is_alive():
self.debug('Queue modified. Reset run generator')
# self.executor.queue_modified = True
self.executor.set_queue_modified()
def refresh_executable(self, qs=None):
if qs is None:
qs = self.experiment_queues
if self.executor.is_alive():
qs = (self.executor.experiment_queue,)
self.executor.executable = all([ei.is_executable() for ei in qs])
self.debug('setting executable {}'.format(self.executor.executable))
def update_queues(self):
self._update_queues()
def update_info(self):
try:
self._update()
except BaseException as e:
self.debug_exception()
self.warning_dialog('Failed updating info: Error={}'.format(e))
# ===============================================================================
# info update
# ===============================================================================
def _get_all_automated_runs(self, qs=None):
if qs is None:
qs = self.experiment_queues
return [ai for ei in qs
for ai in ei.automated_runs
if ai.executable]
def _update(self, queues=None):
self.debug('update runs')
if queues is None:
queues = self.experiment_queues
queues = [qi for qi in queues if qi.is_updateable()]
if not queues:
return
self.debug('executor executable {}'.format(self.executor.executable))
self.debug('updating stats, ')
self.executor.stats.calculate()
self.refresh_executable(queues)
self._set_analysis_metadata()
self.debug('info updated')
for qi in queues:
qi.refresh_table_needed = True
def _set_analysis_metadata(self):
cache = dict()
db = self.get_database()
aruns = self._get_all_automated_runs()
with db.session_ctx():
for ai in aruns:
if ai.skip:
continue
ln = ai.labnumber
if ln == 'dg':
continue
# is run in cache
if ln not in cache:
info = db.get_identifier_info(ln)
self.debug('Info for {}={}'.format(ln, info))
if not info:
cache[ln] = dict(identifier_error=True)
else:
info['identifier_error'] = False
cache[ln] = info
ai.trait_set(**cache[ln])
def execute_queues(self, queues):
names = ','.join([e.name for e in queues])
self.debug('queues: n={}, names={}'.format(len(queues), names))
self.executor.trait_set(experiment_queues=queues, experiment_queue=queues[0])
return self.executor.execute()
def verify_database_connection(self, inform=True):
db = self.get_database()
if db is not None:
if db.connect(force=True):
return True
elif inform:
self.warning_dialog('No Database available')
def sync_queue(self, queue):
ms = queue.mass_spectrometer
ed = queue.extract_device
db = self.get_database()
with db.session_ctx():
next_pos = None
for i, ai in enumerate(queue.automated_runs):
if ai.skip or ai.is_special():
continue
kw = {'identifier': ai.identifier, 'position': ai.position,
'mass_spectrometer': ms.lower(),
'extract_device': ed}
if ai.is_step_heat():
kw['aliquot'] = ai.aliquot
kw['extract_value'] = ai.extract_value
self.debug('checking {}/{}. attr={}'.format(i, ai.runid, kw))
aa = db.get_analysis_by_attr(**kw)
if aa is None:
self.debug('----- not found')
if next_pos == ai:
i -= 1
break
elif not self.confirmation_dialog('Found analyses up to {}. '
'position={}, extract={}. '
'Continue searching?'.format(ai.runid, ai.extract_value,
ai.position)):
break
next_pos = queue.automated_runs[i + 1]
if i:
if i == len(queue.automated_runs) - 1:
self.information_dialog('All Analyses from this experiment have been run')
else:
queue.automated_runs = queue.automated_runs[i:]
else:
self.information_dialog('No Analyses from this experiment have been run')
# ===============================================================================
# handlers
# ===============================================================================
def _experiment_queue_changed(self, eq):
if eq:
self.experiment_factory.queue = eq
self.experiment_factory.sync_queue_meta()
self.experiment_factory.edit_enabled = True
else:
self.experiment_factory.edit_enabled = False
@on_trait_change('executor:experiment_queue')
def _activate_editor(self, eq):
self.activate_editor_event = id(eq)
@on_trait_change('experiment_queues[]')
def _update_queues(self):
qs = self.experiment_queues
self.executor.stats.experiment_queues = qs
@on_trait_change('experiment_factory:run_factory:changed')
def _queue_dirty(self):
self.experiment_queue.changed = True
@on_trait_change('experiment_queue:dclicked')
def _dclicked_changed(self, new):
self.experiment_factory.run_factory.edit_mode = True
self._set_factory_runs(self.experiment_queue.selected)
@on_trait_change('experiment_factory:run_factory:update_info_needed')
def _refresh3(self):
self.debug('update info needed fired')
self.update_info()
@on_trait_change('executor:queue_modified')
def _refresh5(self, new):
if new:
self.debug('queue modified fired')
self.update_info()
@on_trait_change('experiment_factory:run_factory:refresh_table_needed')
def _refresh4(self):
for qi in self.experiment_queues:
qi.refresh_table_needed = True
@on_trait_change('experiment_factory:save_button')
def _save_update(self):
self.save_event = True
self.update_info()
@on_trait_change('experiment_queue:refresh_info_needed')
def _handle_refresh(self):
self.update_info()
@on_trait_change('experiment_queue:selected')
def _selected_changed(self, new):
ef = self.experiment_factory
rf = ef.run_factory
rf.edit_mode = False
if new:
self._set_factory_runs(new)
# if self.executor.is_alive():
a = new[-1]
if not a.skip:
self.executor.stats.calculate_at(a, at_times=self.executor.is_alive())
# self.stats.calculate()
@on_trait_change('experiment_factory:queue_factory:delay_between_analyses')
def handle_delay_between_analyses(self, new):
if self.executor.is_alive():
self.executor.experiment_queue.delay_between_analyses = new
def _set_factory_runs(self, new):
ef = self.experiment_factory
rf = ef.run_factory
# print 'set runs'
# rf.special_labnumber = 'Special Labnumber'
rf.suppress_update = True
rf.set_selected_runs(new)
rf.suppress_update = False
def _executor_factory(self):
e = ExperimentExecutor(mode=self.mode,
application=self.application)
e.bind_preferences()
return e
# ===============================================================================
# defaults
# ===============================================================================
def _executor_default(self):
return self._executor_factory()
def _experiment_factory_default(self):
dms = 'Spectrometer'
if self.application:
p2 = 'pychron.spectrometer.base_spectrometer_manager.BaseSpectrometerManager'
spec = self.application.get_service(p2)
if spec:
dms = spec.name.capitalize()
e = ExperimentFactory(application=self.application,
dvc=self.dvc,
default_mass_spectrometer=dms)
return e
# ============= EOF =============================================
| apache-2.0 | -4,886,870,572,820,441,000 | 34.990654 | 110 | 0.521769 | false |
Chaffelson/whoville | whoville/cloudbreak/models/blueprint_input.py | 1 | 4561 | # coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class BlueprintInput(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'property_value': 'str'
}
attribute_map = {
'name': 'name',
'property_value': 'propertyValue'
}
def __init__(self, name=None, property_value=None):
"""
BlueprintInput - a model defined in Swagger
"""
self._name = None
self._property_value = None
if name is not None:
self.name = name
if property_value is not None:
self.property_value = property_value
@property
def name(self):
"""
Gets the name of this BlueprintInput.
:return: The name of this BlueprintInput.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this BlueprintInput.
:param name: The name of this BlueprintInput.
:type: str
"""
self._name = name
@property
def property_value(self):
"""
Gets the property_value of this BlueprintInput.
:return: The property_value of this BlueprintInput.
:rtype: str
"""
return self._property_value
@property_value.setter
def property_value(self, property_value):
"""
Sets the property_value of this BlueprintInput.
:param property_value: The property_value of this BlueprintInput.
:type: str
"""
self._property_value = property_value
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, BlueprintInput):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 | -7,454,445,487,106,906,000 | 29.610738 | 984 | 0.585617 | false |
williamoverall/soundtomidi | soundtomidi/soundtomidi.py | 1 | 48543 | """Audio Processor. Takes live audio and generates MIDI information from it.
Here are the configuration options, and they go on and on. Some of
them are optimistic that I'll come back later and add other options, but
don't count on it. Probably the most important thing to wrap your head
around is that you set a frame size for audio capture (say, 512), and each
audio processor uses stores some multiple of that before it does anything.
A multiple of "4" means the audio processor waits until 2048 bytes (4x512)
have arrived before doing anything. The hop size (which aubio uses as a
window for its work) is then a multiple of that. Most of the time it seems
like the hop size should be exactly half of the window size, so you'd put
.5 in that configuration option. If the window size and hop size should be
the same (which worked best for me for beat detection), enter 1 as the
configuration option.
On the MIDI side, nearly everything is about what controller, note, or sysex
message should be used to output information. Presumably you'll just need
one or the other for each audio processor. It really depends on what is going
to be on the receiving end of this information how you set this up. The
defaults use some safe options--manufacturer is the MIDI "for educational use"
prefix, and the controllers are not officially mapped to anything. Notes are
only (potentially) used for pitches, but it should be clear how you could
modify the code to use notes for anything else as well.
Usage:
audioprocessor.py [options]
Options:
-h --help Show this screen.
Quits after.
--listsounddevices List available sound devices.
Quits after.
--listmidiports List MIDI ports.
Quits after.
--writeinifile Write options to ini file, as specified by
inifile option. If the file already present,
a backup is made of original.
--inifile=FILE Name of options settings file.
[default: soundtomidi.ini]
--inputdevice=DEVICE ID of the sound input device. System default
audio input device will be used if not
specified.
[default: default]
--channels=CHANNELS Number of channels to capture.
[default: 1]
--samplerate=SAMPLERATE Capture rate for audio samples.
[default: 44100]
--framesize=FRAMESIZE Size of each frame captured.
[default: 512]
--stdout=STDOUT Echo message to standard out.
[default: False]
--stdoutformat=STDOUTFORMAT Format for standard out messages. Options are
"verbose", "bytes", "bin" or "hex".
[default: verbose]
--midiout=MIDIOUT Send MIDI messages?
[default: True]
--outport=MIDIOUTPORT Name of the MIDI output port. If left as
default, uses first MIDI port found.
[default: default]
--outchannel=OUTCHANNEL Number of the MIDI channel to send messages on.
Valid numbers 1-16.
[default: 14]
--sysexmanf=MANF Manufacturer prefix code for sysex messages.
Int or hex values, separarated by space.
[default: 0x7D]
--gettempo=TEMPO Get the tempo of the audio.
[default: True]
--talg=TALG Aubio algorithm for determining the tempo.
[default: default]
--tframemult=TFRAMEMULT Number of frames to use in calculation.
[default: 1]
--thopmult=THOPMULT Hop size, as percent of FRAMEMULT.
[default: .5]
--taverage=TAVERAGE Number of BPM values to average.
[default: 1]
--tcount=TCOUNT Number of BPM averages to be stored before
the most common one is sent as a message.
[default: 1]
--tcontrolnum=TCONTROLNUM Controller number to send BPM messages.
If "None", no control messages will be sent.
[default: 14]
--tcontroltype=TCONTROLTYPE How to encode the BPM value for control.
"minus60" sends BPM value minus 60.
EG: 60 BPM = 0 value,
120 BPM = 60 value,
187 BPM = 127 value.
[default: minus60]
--tsysexnum=TSYSEXNUM Prefix to send prior to BPM in
sysex messages.
If "None", no sysex messages will be sent.
[default: 0x0B]
--tsysextype=TSYSEXTYPE How to encode the BPM value for sysex.
"minus60" sends BPM value minus 60.
(See --tsysexcontroltype)
"twobytes" takes the BPM value to
the tenth (EG, 128.1), multiplies it
by 10 (1281), then spreads this
across two 7 bit values (0x10 0x01)
[default: twobytes]
--getbeats=BEATS Get the beats of the audio.
[default: True]
--balg=BALG Aubio algorithm to use for the beat.
[default: default]
--bframemult=BFRAMEMULT Number of frames to use in calculation.
[default: 1]
--bhopmult=BHOPMULT Hop size, as percent of FRAMEMULT.
[default: 1]
--bcontrolnum=BCONTROLNUM Controller number to send beat messages.
If "None", no control messages will be sent.
[default: 15]
--bsysexnum=BSYSEXNUM Prefix to send prior to beat number
in sysex messages.
If "None", no sysex messages will be sent.
[default: 0x1B]
--bvaltype=BVALTYPE Type of value to send with beat
controller or sysex message.
Any arbitrary number 0-127, or a
comma separated looping listing of
values to send. For example,
"0,1,2,3" will send "0" for the first
beat, "3" for fourth beat, then back to
"0" for the next one.
[default: 0,1,2,3,4,5,6,7]
--bclock=BCLOCK Send 24 clock ticks messages after each beat.
(Not yet implemented)
[default: False]
--getrms=RMS Get the RMS.
[default: True]
--rframemult=FFRAMEMULT Number of frames to use in calculation.
[default: 4]
--rhopmult=FHOPMULT Hop size, as percent of FRAMEMULT.
[default: 1]
--rcontrolnum=FRMSNUM Controller number to send RMS messages.
If "None", no frequency strength sysex messages
will be sent.
[default: 20]
--rsysexnum=FSYSEXNUM Prefix to send prior to RMS values
If "None", no RMS sysex messages will be sent.
[default: 0x1F]
--rgraceful=FRMSGRACEFUL Gracefully let go of RMS peaks.
EG: one frame peaks at 100, followed by a drop
to 20. Instead of immediately reflecting the
new value, this rule sets a cut-off for the
drop to the chosen percent. The higher the
percent, the slower the decline. New high peaks
reset this graceful fade and it starts again.
Set to 0.0 to turn off.
[default: .5]
--getfrequencies=FREQS Get the strength of filtered frequencies.
[default: True]
--falg=FALG Aubio algorithm to use for determining
the strength of the frequencies.
[default: default]
--fframemult=FFRAMEMULT Number of frames to use in calculation.
[default: 4]
--fhopmult=FHOPMULT Hop size, as percent of FRAMEMULT.
[default: 1]
--fcount=FCOUNT Number of frequency values to hold
before taking any action. Maximum value
of set will be sent.
[default: 2]
--fbuckets=FBUCKETS Filter bands to use for use for dividing up
frequencies. Comma separated list of values
plus a low and high end barrier value.
See Aubio docs "filterbanks" for more details.
Shortcuts "octave" and "third-octave" shortcut
for standard octave or 1/3 octave bands.
[default: third-octave]
--fsysexnum=FSYSEXNUM Prefix to send prior to frequency strength
values.
If "None", no sysex messages will be sent.
[default: 0x0F]
--fgraceful=FGRACEFUL Gracefully let go of frequency peaks.
EG: a frame peaks at 100, followed by
a drop to 20. Instead of immediately reflecting
the new value, this rule sets a cut-off for the
drop to the chosen percent. The higher the
percent, the slower the decline. New high peaks
reset this graceful fade and it starts again.
Set to 0.0 to turn off.
[default: .8]
--getpitch=PITCHES Get the fundamental pitch of the audio.
[default: True]
--palg=PALG Aubio algorithm to use for pitch of the audio.
[default: yin]
--pframemult=PFRAMEMULT Number of frames to use in calculation.
[default: 2]
--phopmult=PHOPMULT Hop size, as percent of FRAMEMULT.
[default: .5]
--ptolerance=PTOLERANCE Required confidence level for a pitch.
[default: 0.5]
--pcount=PCOUNT Number of pitch averages to be stored before
the most common one is sent as a message.
[default: 8]
--plowcutoff=PLOWCUTOFF Lowest pitch to consider.
[default: 0]
--phighcutoff=PHIGHCUTOFF Highest pitch to consider.
[default: 127]
--pfoldoctaves=PFOLDOCTAVES Return just 12 note values instead of the
possible 128.
[default: False]
--pnumoffset=PNUMOFFSET Used only with the above option. Shifts the "C"
value to somewhere else, and each note above
that. Middle C is "60", which is the default.
[default: 60]
--pnoteon=PNOTEON Send note on messages for the audio pitch.
[default: True]
--pnoteoff=PNOTEOFF Send note off messages when a new audio
pitch doesn't match previous pitch.
[default: True]
--pcontrolnum=PCONTROLNUM Controller number to send pitches.
If "None", no control messages will be sent.
[default: 21]
--psysexnum=PSYSEXNUM Prefix to send prior to sending note value.
If "None", no sysex messages will be sent.
[default: 0x09]
"""
from __future__ import print_function
from __future__ import division
from docopt import docopt
import configparser
import os.path
import sys
import time
import numpy as np
import sounddevice as sd
from datetime import datetime as dt
from collections import Counter
from aubio import pitch, tempo, pvoc, filterbank, fvec
import mido
import math
class Options:
"""Take configuration options as arguments or from an inifile.
Docopt is used to describe the various configuration options, and
there are many. This is handy for one off changes, but having a nice
.ini file, organized by function, makes things a little clearer. This
class mushes together command line arguments and ini file options into
an dictionary used throughout the program.
Other than loading all this up, it also is capable of generating a
template .ini file organized by section with the defaults from docopt
already poplulated. The write_options_ini functions could definitely
be written more efficiently, but at this early stage it is helpful
to see things listed the long way. Potentially something could tap into
the docopt library and generate the ini file without even explicitly
typing all of this out.
"""
def __init__(self):
self.settings = docopt(__doc__, version='Audio Processor 0.1')
for key, value in self.settings.items():
new_key = key[2:]
self.settings[new_key] = self.settings.pop(key)
config = configparser.ConfigParser(allow_no_value=True)
filename = self.settings['inifile']
if os.path.isfile(filename) and config.read(filename)[0] == filename:
for section in config.sections():
for key, value in config.items(section):
if value is None:
value = True
# This is messy right now. INI files will override default
# or arguments. Should be docopt defaults, which INI can
# replace, which arguments can replace.
self.settings[key] = value
def write_options_ini(self):
config = configparser.ConfigParser(allow_no_value=True)
config.add_section('soundcard')
config.set('soundcard', 'inputdevice', self.settings['inputdevice'])
config.set('soundcard', 'channels', self.settings['channels'])
config.set('soundcard', 'samplerate', self.settings['samplerate'])
config.set('soundcard', 'framesize', self.settings['framesize'])
config.add_section('stdout')
config.set('stdout', 'stdout', self.settings['stdout'])
config.set('stdout', 'stdoutformat', self.settings['stdoutformat'])
config.add_section('midi')
config.set('midi', 'midiout', self.settings['midiout'])
config.set('midi', 'outport', self.settings['outport'])
config.set('midi', 'outchannel', self.settings['outchannel'])
config.set('midi', 'sysexmanf', self.settings['sysexmanf'])
config.add_section('tempo')
config.set('tempo', 'gettempo', self.settings['gettempo'])
config.set('tempo', 'talg', self.settings['talg'])
config.set('tempo', 'tframemult', self.settings['tframemult'])
config.set('tempo', 'thopmult', self.settings['thopmult'])
config.set('tempo', 'taverage', self.settings['taverage'])
config.set('tempo', 'tcount', self.settings['tcount'])
config.set('tempo', 'tcontrolnum', self.settings['tcontrolnum'])
config.set('tempo', 'tcontroltype', self.settings['tcontroltype'])
config.set('tempo', 'tsysexnum', self.settings['tsysexnum'])
config.set('tempo', 'tsysextype', self.settings['tsysextype'])
config.add_section('beats')
config.set('beats', 'getbeats', self.settings['getbeats'])
config.set('beats', 'balg', self.settings['balg'])
config.set('beats', 'bframemult', self.settings['bframemult'])
config.set('beats', 'bhopmult', self.settings['bhopmult'])
config.set('beats', 'bcontrolnum', self.settings['bcontrolnum'])
config.set('beats', 'bsysexnum', self.settings['bsysexnum'])
config.set('beats', 'bvaltype', self.settings['bvaltype'])
config.set('beats', 'bclock', self.settings['bclock'])
config.add_section('rms')
config.set('rms', 'getrms', self.settings['getrms'])
config.set('rms', 'rframemult', self.settings['rframemult'])
config.set('rms', 'rhopmult', self.settings['rhopmult'])
config.set('rms', 'rcontrolnum', self.settings['rcontrolnum'])
config.set('rms', 'rsysexnum', self.settings['rsysexnum'])
config.set('rms', 'rgraceful', self.settings['rgraceful'])
config.add_section('frequencies')
config.set('frequencies', 'getfrequencies',
self.settings['getfrequencies'])
config.set('frequencies', 'falg', self.settings['falg'])
config.set('frequencies', 'fframemult', self.settings['fframemult'])
config.set('frequencies', 'fhopmult', self.settings['fhopmult'])
config.set('frequencies', 'fcount', self.settings['fcount'])
config.set('frequencies', 'fbuckets', self.settings['fbuckets'])
config.set('frequencies', 'fsysexnum', self.settings['fsysexnum'])
config.set('frequencies', 'fgraceful', self.settings['fgraceful'])
config.add_section('pitch')
config.set('pitch', 'getpitch', self.settings['getpitch'])
config.set('pitch', 'palg', self.settings['palg'])
config.set('pitch', 'pframemult', self.settings['pframemult'])
config.set('pitch', 'phopmult', self.settings['phopmult'])
config.set('pitch', 'ptolerance', self.settings['ptolerance'])
config.set('pitch', 'pcount', self.settings['pcount'])
config.set('pitch', 'plowcutoff', self.settings['plowcutoff'])
config.set('pitch', 'phighcutoff', self.settings['phighcutoff'])
config.set('pitch', 'pfoldoctaves', self.settings['pfoldoctaves'])
config.set('pitch', 'pnumoffset', self.settings['pnumoffset'])
config.set('pitch', 'pnoteon', self.settings['pnoteon'])
config.set('pitch', 'pnoteoff', self.settings['pnoteoff'])
config.set('pitch', 'pcontrolnum', self.settings['pcontrolnum'])
config.set('pitch', 'psysexnum', self.settings['psysexnum'])
if os.path.exists(self.settings['inifile']):
os.rename(self.settings['inifile'],
self.settings['inifile'] + "." + dt.now().strftime("%s"))
with open(self.settings['inifile'], 'wb') as configfile:
config.write(configfile)
class TempoFinder:
"""Tempo finder object that receives frames and sends MIDI messages.
Sticky object that initializes with the Aubio tempo object, as adjusted
by the many configuration options that are available. Sets up a holder
for incoming frames of audio data. Once there are enough frames to work
with, the data is combined and processed by the tempo object. Results
are cleaned up, and MIDI messages as configured are sent out.
There appear to be many ways of trying to send this information via MIDI.
The issue is that MIDI data bytes are 0-127. Two options are built in.
One, just subtract 60 from the rounded BPM value and use that. On the
receiving end, just add 60 back to the value and there you go. Another
option is to spread the number out across two 7 bit bytes (that sounds
wrong), which is the default for sysex messages. The BPM is multiplied
by 10, rounded, then bit shifted across two bytes. On the receiving end,
reassemble to value like this:
(first_data_byte*128)+second_data_byte) / 10.0
"""
def __init__(self, options):
self.tempo_object = tempo(options.settings['talg'],
int(float(options.settings['framesize']) *
float(options.settings['tframemult'])),
int(float(options.settings['framesize']) *
float(options.settings['tframemult']) *
float(options.settings['thopmult'])),
int(float(options.settings['samplerate'])))
self.frame_arrays = np.zeros(
(int(float(options.settings['tframemult'])),
int(float(options.settings['framesize']))),
dtype=np.float32)
self.midi_processor = None
self.sysex_command_array = []
for command in options.settings['tsysexnum'].split(' '):
self.sysex_command_array.append(int(command, 0))
self.bpm_sysex_rule = self.bpm_to_two_bytes
if options.settings['tsysextype'] == 'twobytes':
self.bpm_sysex_rule = self.bpm_to_two_bytes
elif options.settings['tsysextype'] == 'minus60':
self.bpm_sysex_rule = self.bpm_minus_sixty
self.control_number = False
if options.settings['tcontrolnum'] != 'None':
self.control_number = int(options.settings['tcontrolnum'], 0)
self.bpm_control_rule = self.bpm_minus_sixty
if options.settings['tcontroltype'] == 'minus60':
self.bpm_control_rule = self.bpm_minus_sixty
self.frame_count = 0
self.BPMs = []
self.average_BPMs = []
self.last_BPM = 0.0
self.average = int(options.settings['taverage'])
self.count = int(options.settings['tcount'])
self.frame_multiplier = int(options.settings['tframemult'])
def add_frame(self, frame_array):
self.frame_arrays[self.frame_count] = frame_array
self.frame_count += 1
if self.frame_count == self.frame_multiplier:
combined_array = np.ravel(self.frame_arrays)
self.tempo_object(combined_array)
bpm = self.tempo_object.get_bpm()
if bpm < 60.0:
bpm *= 2.0
if bpm < 60.0:
bpm = 60.0
if bpm > 187.0:
bpm /= 2.0
if bpm > 187.0:
bpm = 187.0
self.BPMs.append(bpm)
if len(self.BPMs) > self.average:
del self.BPMs[0]
self.average_BPMs.append(round(sum(self.BPMs) /
len(self.BPMs), 1))
if len(self.average_BPMs) > self.count:
del self.average_BPMs[0]
most_bpm, foo = Counter(self.average_BPMs).most_common(1)[0]
if most_bpm != self.last_BPM:
self.last_BPM = most_bpm
if self.control_number:
self.midi_processor.add_control_message(
self.control_number, self.bpm_control_rule(most_bpm)[0]
)
if self.sysex_command_array:
self.midi_processor.add_sysex_message(
self.sysex_command_array,
self.bpm_sysex_rule(most_bpm)
)
self.frame_count = 0
@staticmethod
def bpm_to_two_bytes(bpm):
bpm = int(bpm * 10)
bytesarray = [bpm >> 7, bpm & 0x7F]
return bytesarray
@staticmethod
def bpm_minus_sixty(bpm):
bpm = int(bpm - 60)
if bpm < 0:
bpm = 0
elif bpm > 127:
bpm = 127
return [bpm]
class BeatFinder:
"""Beat finder object that receives frames and sends MIDI messages.
Sticky object that initializes with the Aubio tempo object, as adjusted
by the many configuration options that are available. Sets up a holder
for incoming frames of audio data. Once there are enough frames to work
with, the data is combined and processed by the tempo object. Results
are cleaned up, and MIDI messages as configured are sent out.
TODO: Add a mechanism for sending 24 clock tick messages. Trivial to
just send 24 messages to the MidiProcessor right away after a beat,
but the clock ticks should probably be at spaced out evenly. The problem
comes in when the music speeds up and the next beat arrives before the 24
clock ticks have sent. It also really saturates the MidiProcessor, as well
as software that is listening to the messages. Leaving this out for now.
"""
def __init__(self, options):
self.beat_object = tempo(options.settings['balg'],
int(float(options.settings['framesize']) *
float(options.settings['bframemult'])),
int(float(options.settings['framesize']) *
float(options.settings['bframemult']) *
float(options.settings['bhopmult'])),
int(float(options.settings['samplerate'])))
self.frame_arrays = np.zeros(
(int(float(options.settings['bframemult'])),
int(float(options.settings['framesize']))),
dtype=np.float32)
self.midi_processor = None
self.sysex_command_array = []
for command in options.settings['bsysexnum'].split(' '):
self.sysex_command_array.append(int(command, 0))
self.control_number = False
if options.settings['bcontrolnum'] != 'None':
self.control_number = int(options.settings['bcontrolnum'], 0)
self.beat_sequence = []
for item in options.settings['bvaltype'].split(','):
self.beat_sequence.append(int(item.strip()))
if not self.beat_sequence:
self.beat_sequence = [64]
self.beat_sequence_position = 0
self.frame_count = 0
self.frame_multiplier = int(options.settings['bframemult'])
def add_frame(self, frame_array):
self.frame_arrays[self.frame_count] = frame_array
self.frame_count += 1
if self.frame_count == self.frame_multiplier:
combined_array = np.ravel(self.frame_arrays)
is_beat = self.beat_object(combined_array)
if is_beat:
value_data = [self.beat_sequence[self.beat_sequence_position]]
if self.control_number:
self.midi_processor.add_control_message(
self.control_number, self.beat_sequence_position
)
if self.sysex_command_array:
self.midi_processor.add_sysex_message(
self.sysex_command_array, value_data
)
self.beat_sequence_position += 1
if self.beat_sequence_position == len(self.beat_sequence):
self.beat_sequence_position = 0
self.frame_count = 0
class RMSFinder:
"""RMS finder object that receives frames and sends MIDI messages.
Sticky object that sets up a holder for incoming frames of audio data.
Once there are enough frames to work with, the data is combined and
processed by the filter object. Results are cleaned up, and MIDI messages
as configured are sent out.
This function does not rely on the Aubio library.
"""
def __init__(self, options):
self.midi_processor = None
self.sysex_rms_command_array = []
for command in options.settings['rsysexnum'].split(' '):
self.sysex_rms_command_array.append(int(command, 0))
self.rms_control_number = False
if options.settings['rcontrolnum'] != 'None':
self.rms_control_number = \
int(options.settings['rcontrolnum'], 0)
self.frame_arrays = np.zeros(
(int(float(options.settings['rframemult'])),
int(float(options.settings['framesize']))),
dtype=np.float32)
self.frame_count = 0
self.max_rms = 0
self.last_scaled_rms = 0
self.frame_multiplier = int(options.settings['rframemult'])
self.graceful = float(options.settings['rgraceful'])
def add_frame(self, frame_array):
self.frame_arrays[self.frame_count] = frame_array
self.frame_count += 1
if self.frame_count == self.frame_multiplier:
self.frame_count = 0
combined_array = np.ravel(self.frame_arrays)
rms = self.qmean(combined_array)
if rms > self.max_rms:
self.max_rms = rms
if self.max_rms > 0:
scaled_rms = int(127 * (rms / self.max_rms))
if scaled_rms != self.last_scaled_rms:
graceful_rms = int(self.last_scaled_rms * self.graceful)
if scaled_rms < graceful_rms:
scaled_rms = graceful_rms
if self.rms_control_number:
self.midi_processor.add_control_message(
self.rms_control_number, scaled_rms)
if self.sysex_rms_command_array:
self.midi_processor.add_sysex_message(
self.sysex_rms_command_array, [scaled_rms])
self.last_scaled_rms = scaled_rms
@staticmethod
def qmean(num):
return math.sqrt(sum(n * n for n in num) / len(num))
class FrequenciesFinder:
"""Frequency finder object that receives frames and sends MIDI messages.
Sticky object that initializes with the Aubio filter object, as adjusted
by the many configuration options that are available. Sets up a holder
for incoming frames of audio data. Once there are enough frames to work
with, the data is combined and processed by the filter object. Results
are cleaned up, and MIDI messages as configured are sent out.
Note that this is definitely the most challenging processing work, and
there is potential memory leak issue as described below.
"""
def __init__(self, options):
if options.settings['fbuckets'] == 'third-octave':
options.settings['fbuckets'] = [22.4,
25, 31.5, 40, 50, 63,
80, 100, 125, 160, 200,
250, 315, 400, 500, 630,
800, 1000, 1250, 1600, 2000,
2500, 3150, 4000, 5000, 6300,
8000, 10000, 12500, 16000, 20000,
22390]
elif options.settings['fbuckets'] == 'octave':
options.settings['fbuckets'] = [22,
31.5, 63, 125, 250, 500,
1000, 2000, 4000, 8000, 16000,
22720]
self.midi_processor = None
self.sysex_command_array = []
for command in options.settings['fsysexnum'].split(' '):
self.sysex_command_array.append(int(command, 0))
self.filter_bank = filterbank(len(options.settings['fbuckets']) - 2,
(int(options.settings['framesize']) *
int(options.settings['fframemult'])))
self.frequencies = fvec(options.settings['fbuckets'])
self.filter_bank.set_triangle_bands(self.frequencies,
int(options.settings[
'samplerate']))
self.phase_vocoder = pvoc(int(float(options.settings['framesize']) *
float(options.settings['fframemult'])),
int(float(options.settings['framesize']) *
float(options.settings['fframemult']) *
float(options.settings['fhopmult'])))
self.frame_arrays = np.zeros(
(int(float(options.settings['fframemult'])),
int(float(options.settings['framesize']))),
dtype=np.float32)
self.frame_count = 0
self.maximum_frequencies = np.zeros(
(len(options.settings['fbuckets']) - 2,), dtype=np.float32)
self.last_energies = np.zeros((len(options.settings['fbuckets']) - 2,),
dtype=np.float32)
self.count_energies = np.zeros((int(options.settings['fcount']),
(len(options.settings[
'fbuckets']) - 2)),
dtype=np.float32)
self.energy_count = 0
self.rest_stop = 0
self.frame_multiplier = int(options.settings['fframemult'])
self.count = int(options.settings['fcount'])
self.graceful = float(options.settings['fgraceful'])
def add_frame(self, frame_array):
self.frame_arrays[self.frame_count] = frame_array
self.frame_count += 1
if self.frame_count == self.frame_multiplier:
self.frame_count = 0
combined_array = np.ravel(self.frame_arrays)
# This is causing a memory leak on a OSX Brew installed version of
# Aubio, at least according to "top". Even creating and destroying
# the phase vocoder each time through the loop doesn't seem to
# solve the problem. I believe the intent is for the phase vocoder
# to hold previous runs to match up previous calls with data, but
# it appears to be a little too sticky.
fftgrain = self.phase_vocoder(combined_array)
self.count_energies[self.energy_count] = self.filter_bank(fftgrain)
self.energy_count += 1
if self.energy_count == self.count:
self.energy_count = 0
energies = np.amax(self.count_energies, axis=0)
self.maximum_frequencies = np.maximum(energies,
self.maximum_frequencies)
energies = np.divide(energies, self.maximum_frequencies)
energies = np.maximum(energies, self.last_energies)
self.last_energies = energies * self.graceful
energies *= 127.0
int_energies = energies.astype(int)
if self.sysex_command_array:
self.midi_processor.add_sysex_message(
self.sysex_command_array, int_energies)
class PitchFinder:
"""Pitch finder object that receives frames and sends MIDI messages.
Sticky object that initializes with the Aubio pitch object, as adjusted
by the many configuration options that are available. Sets up a holder
for incoming frames of audio data. Once there are enough frames to work
with, the data is combined and processed by the pitch object. Results
are cleaned up, and MIDI messages as configured are sent out.
You can send (and probably should) both note_on and note_off messages
here. Note_off messages are sent when an incoming note doesn't match
the previously sent one. The control and sysex message types on the other
hand only send when there is new note on information.
"""
def __init__(self, options):
self.algorithm = options.settings['palg']
self.frame_size = float(options.settings['framesize'])
self.frame_multiplier = int(options.settings['pframemult'])
self.hop_multiplier = float(options.settings['phopmult'])
self.samplerate = float(options.settings['samplerate'])
self.tolerance = float(options.settings['ptolerance'])
self.sysexnumber = options.settings['psysexnum']
self.sysex_command_array = []
if options.settings['psysexnum'] != 'None':
for command in options.settings['psysexnum'].split(' '):
self.sysex_command_array.append(int(command, 0))
self.control_number = False
if options.settings['pcontrolnum'] != 'None':
self.control_number = int(options.settings['pcontrolnum'], 0)
self.send_note_ons = False
if options.settings['pnoteon'] == 'True':
self.send_note_ons = True
if options.settings['pnoteoff'] == 'True':
self.send_note_offs = True
self.count = int(options.settings['pcount'])
self.low_cutoff = int(options.settings['plowcutoff'])
self.high_cutoff = int(options.settings['phighcutoff'])
self.fold_octaves = False
if options.settings['pfoldoctaves'] == 'True':
self.fold_octaves = True
self.num_offset = int(options.settings['pnumoffset'])
self.midi_processor = None
self.pitch_object = pitch(self.algorithm,
int(self.frame_size *
self.frame_multiplier),
int(self.frame_size *
self.frame_multiplier *
self.hop_multiplier),
int(self.samplerate))
if self.tolerance != 'None':
self.pitch_object.set_tolerance(self.tolerance)
self.pitch_object.set_unit('midi')
self.frame_arrays = np.zeros(
(int(self.frame_multiplier),
int(self.frame_size)),
dtype=np.float32)
self.frame_count = 0
self.most_pitches = [-1]
self.pitch_count = 0
self.last_pitch = 0
def add_frame(self, frame_array):
self.frame_arrays[self.frame_count] = frame_array
self.frame_count += 1
if self.frame_count == self.frame_multiplier:
self.frame_count = 0
combined_array = np.ravel(self.frame_arrays)
pitches = self.pitch_object(combined_array)
for x in range(
int(round(self.pitch_object.get_confidence() * 10))):
self.most_pitches.append(self.midify_pitch(pitches))
self.pitch_count += 1
if self.pitch_count == self.count:
self.pitch_count = 0
most_pitch, foo = Counter(self.most_pitches).most_common(1)[0]
if most_pitch != self.last_pitch:
if most_pitch == -1:
if self.send_note_offs:
self.midi_processor.add_note_off_message(
self.last_pitch)
elif self.last_pitch == -1:
if self.send_note_ons:
self.midi_processor.add_note_on_message(most_pitch)
if self.control_number:
self.midi_processor.add_control_message(
self.control_number, most_pitch)
if self.sysex_command_array:
self.midi_processor.add_sysex_message(
self.sysex_command_array, [most_pitch]
)
else:
if self.send_note_offs:
self.midi_processor.add_note_off_message(
self.last_pitch)
if self.send_note_offs:
self.midi_processor.add_note_on_message(most_pitch)
if self.control_number:
self.midi_processor.add_control_message(
self.control_number, most_pitch)
if self.sysex_command_array:
self.midi_processor.add_sysex_message(
self.sysex_command_array, [most_pitch]
)
self.last_pitch = most_pitch
self.most_pitches = [-1]
def midify_pitch(self, _pitch):
_pitch = int(round(_pitch[0]))
if _pitch <= 0:
_pitch = -1
if _pitch > 127:
_pitch = -1
if self.low_cutoff > _pitch > self.high_cutoff:
_pitch = -1
if self.fold_octaves:
if 0 <= _pitch <= 120:
_pitch %= 12
_pitch += self.num_offset
else:
_pitch = -1
return _pitch
class MidiProcessor:
"""Wrapper class for receiving messages and sending out via mido library.
Sticky object that receives messages from the various audio processing
classes and MIDIfies them using the mido library. Deals with
the custom manufacturer sysex prefix bytes.
"""
def __init__(self, options):
self.midi_outport = None
self.sysex_prefix = []
for manf_byte in options.settings['sysexmanf'].split(' '):
self.sysex_prefix.append(int(manf_byte, 0))
for channel in options.settings['outchannel'].split(' '):
self.sysex_prefix.append(int(channel, 0) - 1)
self.channel = int(options.settings['outchannel']) - 1
self.stdout = False
if options.settings['stdout'] == 'True':
self.stdout = True
if options.settings['stdoutformat'] == 'bytes':
self.stdoutformat = 1
elif options.settings['stdoutformat'] == 'bin':
self.stdoutformat = 2
elif options.settings['stdoutformat'] == 'hex':
self.stdoutformat = 3
else:
self.stdoutformat = 0
def add_control_message(self, control, value):
self.send_message(mido.Message('control_change',
channel=self.channel,
control=control,
value=value))
def add_note_on_message(self, note):
self.send_message(mido.Message('note_on',
channel=self.channel,
note=note))
def add_note_off_message(self, note):
self.send_message(mido.Message('note_off',
channel=self.channel,
note=note))
def add_sysex_message(self, commands, datas):
sysexdata = self.sysex_prefix[:]
for command in commands:
sysexdata.append(command)
for data in datas:
sysexdata.append(data)
self.send_message(mido.Message('sysex', data=sysexdata))
def send_message(self, mido_message):
if self.midi_outport:
self.midi_outport.send(mido_message)
if self.stdout:
if self.stdoutformat == 0:
print(mido_message)
elif self.stdoutformat == 1:
sys.stdout.write(str(mido_message.bytes()))
elif self.stdoutformat == 2:
sys.stdout.write(mido_message.bin())
elif self.stdoutformat == 3:
sys.stdout.write(mido_message.hex() + ' ')
class ProcessAudio:
"""Primary loop. Take audio frames and deliver to audio processors.
Using sounddevice audio library, and as configured by options, take
incoming data from sound card and send a copy of the audio data to
the audio processors that are turned on. Responsible for initializing
the audio processors and midi processor.
"""
def __init__(self, options):
self.midi_processor = MidiProcessor(options)
if options.settings['midiout']:
if options.settings['outport'] == 'default':
available_ports = mido.get_output_names()
if available_ports:
options.settings['outport'] = available_ports[0]
else:
options.settings['outport'] = ""
if options.settings['outport']:
self.midi_processor.midi_outport = mido.open_output(
options.settings['outport'])
if options.settings['getbeats'] == 'True':
self.beat_finder = BeatFinder(options)
self.beat_finder.midi_processor = self.midi_processor
else:
self.beat_finder = None
if options.settings['gettempo'] == 'True':
self.tempo_finder = TempoFinder(options)
self.tempo_finder.midi_processor = self.midi_processor
else:
self.tempo_finder = None
if options.settings['getrms'] == 'True':
self.rms_finder = RMSFinder(options)
self.rms_finder.midi_processor = self.midi_processor
else:
self.rms_finder = None
if options.settings['getfrequencies'] == 'True':
self.frequencies_finder = FrequenciesFinder(options)
self.frequencies_finder.midi_processor = self.midi_processor
else:
self.frequencies_finder = None
if options.settings['getpitch'] == 'True':
self.pitch_finder = PitchFinder(options)
self.pitch_finder.midi_processor = self.midi_processor
else:
self.pitch_finder = None
if options.settings['inputdevice'] == 'default':
options.settings['inputdevice'] = sd.default.device['input']
self.input_device = options.settings['inputdevice']
self.channels = int(options.settings['channels'])
self.blocksize = int(options.settings['framesize'])
self.samplerate = int(options.settings['samplerate'])
def callback(self, data, ignore_frames, ignore_time, ignore_status):
if any(data):
if self.beat_finder:
self.beat_finder.add_frame(data[:, 0])
if self.tempo_finder:
self.tempo_finder.add_frame(data[:, 0])
if self.rms_finder:
self.rms_finder.add_frame(data[:, 0])
if self.frequencies_finder:
self.frequencies_finder.add_frame(data[:, 0])
if self.pitch_finder:
self.pitch_finder.add_frame(data[:, 0])
def start(self):
with sd.InputStream(device=self.input_device,
channels=self.channels,
callback=self.callback,
blocksize=self.blocksize,
samplerate=self.samplerate):
while True:
pass
if __name__ == '__main__':
# Startup loop. Populates options and starts up ProcessAudio.
#
# Loads the options class, and if it was one the special cases like
# writing the inifile or printing out the midi and sound device
# information, do that then quit.
#
# Otherwise, start the ProcessAudio class and get out of the way.
main_options = Options()
if main_options.settings['writeinifile']:
main_options.write_options_ini()
quit()
elif main_options.settings['listsounddevices'] or main_options.settings[
'listmidiports']:
if main_options.settings['listsounddevices']:
print("\nAvailable sound devices:")
print(sd.query_devices())
if main_options.settings['listmidiports']:
print("\nAvailable MIDI ports:")
print("\n".join(mido.get_output_names()))
print("")
quit()
print("Control-C to quit")
process_audio = ProcessAudio(main_options)
process_audio.start()
while True:
time.sleep(.1)
| mit | -3,844,153,051,610,324,500 | 48.787692 | 79 | 0.549018 | false |
DramaFever/calcifer | calcifer/tree.py | 1 | 9183 | """
`calcifer.tree` module
This module implements a non-deterministic nested dictionary (tree).
The tree comprises leaf nodes, dict nodes, and "unknown nodes" -- nodes
which are known to exist but undefined beyond that.
Ultimately, the policy tree contains *definitions*, a higher-level abstraction
on "value": LeafPolicyNode uses the property `definition`, which may compare
to specific values or generate a template for procuring the value.
"""
from abc import ABCMeta, abstractmethod
import logging
from calcifer.definitions import Value
logger = logging.getLogger(__name__)
class PolicyNode:
"""
Abstract class for node tree.
"""
__metaclass__ = ABCMeta
@abstractmethod
def get_template(self):
"""
Generate the template for the node (recursively)
"""
pass
@abstractmethod
def select(self, path=None):
"""
Traverse the tree and retrieve a specific node with a given path.
`select` retrieves existing nodes or populates default nodes based
on path values.
Returns a tuple of (selected_node, new_root)
"""
if not path:
return (self, self)
@abstractmethod
def match(self, value):
"""
`match` compares a node with a given value, possibly returning an
altered node in the process. For unknown nodes, this means populating
the node with a leaf node defined as having that value.
For nodes with a more complex definition, the behavior of `match`
defers to the definition of the node.
"""
return False, self
@abstractmethod
def choose(self, step):
"""
Moves down the given step and returns:
(the chosen node, the new version of itself (list or dict), and a dict of the steps not taken)
"""
return (None, None, {})
@abstractmethod
def reconstruct(self, possible_steps):
"""
This method takes in a dictionary of possible steps that could be taken and returns a node object
"""
raise NotImplementedError
@staticmethod
def from_obj(obj):
"""
To facilitate converting nested dict data structures, the static
method `from_obj` recursively constructs a PolicyNode tree from
an object
"""
if isinstance(obj, PolicyNode):
return obj
if isinstance(obj, dict):
return DictPolicyNode(**obj)
if isinstance(obj, list):
return ListPolicyNode(*obj)
return LeafPolicyNode(Value(obj))
class UnknownPolicyNode(PolicyNode):
def __init__(self):
pass
@property
def value(self):
return None
def reconstruct(self, possible_steps):
raise TypeError
def get_template(self):
return {}
def choose(self, step):
if isinstance(step, int):
new_self = ListPolicyNode()
steps_not_taken = {k: UnknownPolicyNode() for k in range(step)}
else:
new_self = DictPolicyNode()
steps_not_taken = {}
return (UnknownPolicyNode(), new_self, steps_not_taken)
def select(self, path=None):
if not path:
return (self, self)
# recurse
first = path[0]
rest = path[1:]
value, subpolicy = UnknownPolicyNode().select(rest)
return value, DictPolicyNode(**{first: subpolicy})
def match(self, value):
return True, LeafPolicyNode(Value(value))
def __repr__(self):
return "UnknownPolicyNode()"
def __eq__(self, other):
return isinstance(other, UnknownPolicyNode)
class LeafPolicyNode(PolicyNode):
def __init__(self, definition=None):
self._definition = definition
@property
def definition(self):
return self._definition
@property
def value(self):
return self._definition.value
def reconstruct(self, possible_steps):
if possible_steps:
raise TypeError
return self.__class__(self._definition)
def get_template(self):
return self.definition.get_template()
def choose(self, step):
raise TypeError("You're at the end dummy!")
def select(self, path=None):
if path:
logger.debug((
"Attempting to select sub-path %r of %r"
), path, self)
raise Exception(
"Node cannot be traversed, attempted sub-path: {}".format(path)
)
return (self, self)
def match(self, value):
matches, new_definition = self.definition.match(value)
return matches, LeafPolicyNode(new_definition)
def __repr__(self):
return (
"LeafPolicyNode("
"definition={definition}"
")"
).format(definition=self.definition)
def __eq__(self, other):
return (
isinstance(other, LeafPolicyNode) and
other.definition == self.definition
)
class DictPolicyNode(PolicyNode):
def __init__(self, **nodes):
self._nodes = {
k: PolicyNode.from_obj(v)
for k, v in nodes.items()
}
@property
def nodes(self):
return self._nodes
@property
def keys(self):
return self._nodes.keys()
@property
def value(self):
return {
name: node.value
for name, node in self.nodes.items()
}
def reconstruct(self, possible_steps):
return DictPolicyNode(**possible_steps)
def choose(self, step):
chosen_node = self._nodes.get(step, UnknownPolicyNode())
new_self = self
steps_not_taken = {k: v for k, v in self._nodes.items() if k != step}
return chosen_node, new_self, steps_not_taken
def get_template(self):
return {
k: v.get_template() for k, v in self.nodes.items()
}
def select(self, path=None):
if not path:
return (self, self)
first = path[0]
rest = path[1:]
node, new_first = self[first].select(rest)
new_nodes = {k: v for k, v in self.nodes.items()}
new_nodes[first] = new_first
return node, DictPolicyNode(**new_nodes)
def match(self, value):
return False, self
def __setitem__(self, key, node):
self._nodes[key] = node
def __getitem__(self, key):
if key not in self._nodes:
return UnknownPolicyNode()
return self._nodes[key]
def __repr__(self):
args = ['{}={}'.format(k, v) for k, v in self.nodes.items()]
return "DictPolicyNode({})".format(", ".join(args))
def __eq__(self, other):
return (
isinstance(other, DictPolicyNode) and
other.nodes == self.nodes
)
class ListPolicyNode(PolicyNode):
def __init__(self, *nodes):
self._nodes = [
PolicyNode.from_obj(v)
for v in nodes
]
@property
def nodes(self):
return self._nodes
@property
def keys(self):
return [key for key in range(len(self._nodes))]
@property
def value(self):
return [
node.value
for node in self.nodes
]
def reconstruct(self, possible_steps):
if not possible_steps:
return ListPolicyNode()
highest_key = sorted(possible_steps.keys(), reverse=True)[0]
return ListPolicyNode(*[
possible_steps.get(i, UnknownPolicyNode())
for i in range(highest_key + 1)
])
def choose(self, step):
if len(self._nodes) > step:
# We have the step for sure
chosen_node = self._nodes[step]
else:
# step does not exist yet, must populate list with UnknownPolicyNodes
chosen_node = UnknownPolicyNode()
new_self = self
steps_not_taken = {i: self._nodes[i] for i in range(len(self._nodes)) if i != step}
return chosen_node, new_self, steps_not_taken
def get_template(self):
return [
v.get_template() for v in self.nodes
]
def select(self, path=None):
if not path:
return (self, self)
first = int(path[0])
rest = path[1:]
node, new_first = self[first].select(rest)
new_nodes = [v for v in self.nodes]
new_nodes[first] = new_first
return node, ListPolicyNode(*new_nodes)
def match(self, value):
return False, self
def __setitem__(self, key, node):
key = int(key)
sparsity = key - len(self._nodes) + 1
self._nodes.extend([UnknownPolicyNode()] * sparsity)
self._nodes[key] = node
def __getitem__(self, key):
try:
key = int(key)
return self._nodes[int(key)]
except:
return UnknownPolicyNode()
def __repr__(self):
args = ['{}'.format(v) for v in self.nodes]
return "ListPolicyNode({})".format(", ".join(args))
def __eq__(self, other):
return (
isinstance(other, ListPolicyNode) and
other.nodes == self.nodes
)
| mit | 1,653,217,216,247,063,300 | 25.850877 | 105 | 0.5765 | false |
amol9/fbstats | fbstats/main.py | 1 | 2731 | import sys
import os
from os.path import exists
from mutils.system.scheduler import get_scheduler, PlatformError, FrequencyError
from redcmd import subcmd, CommandLine, CommandLineError, CommandError
from . import globals
from .fb import FB
from .action import Action
@subcmd
def job():
'''Run fbstats as a job.'''
fb = FB()
fb.add_job_perid()
fb.get_friends()
fb.update_stream()
fb.get_stream_job()
fb.clean_duplicates()
fb.get_likes()
fb.get_comments()
@subcmd
def schedule(self):
'Commands to schedule fb stats collection.'
pass
@subcmd(parent='schedule')
def add(frequency):
'''Add schedule.
frequency: time frequency for changing wallpaper'''
scheduler = get_scheduler()
try:
scheduler.schedule(frequency, 'fbstats job', globals.scheduler_taskname)
print('schedule created..')
except (PlatformError, FrequencyError) as e:
print(e)
raise CommandError()
add.__extrahelp__ = Scheduler.frequency_help + os.linesep
add.__extrahelp__ += 'If schedule already exists, it\'ll be overwritten'
@subcmd(parent='schedule')
def remove():
'Remove schedule.'
try:
scheduler = get_scheduler()
scheduler.remove()
print('schedule removed..')
except (PlatformError, FrequencyError) as e:
print(e)
raise CommandError()
@subcmd
def plot():
'Commands to plot various charts.'
pass
@subcmd(parent='plot')
def likes(count=10):
'''Plot top users by likes count.
count: number of users to plot'''
pass
@subcmd(parent='plot')
def posts(count=10):
'''Plot top users by posts count.
count: number of users to plot'''
pass
@subcmd(parent='plot')
def timeline(first_name, last_name):
'''Plot a user's timeline in terms of posts count.
first_name: first name of the user
last_name: last name of the user'''
pass
@subcmd(parent='plot')
def graph(start_date, end_date):
'''Plot a graph of users connected by count of their likes and comments.
start_date: start date of posts
end_date: end date of posts
Date must be of the form: ddmmmyyyy, e.g. 26jan2015.'''
fb = FB()
fb.render_graph(start=start_date, end=end_date)
@subcmd
def setapp():
'Set app id and app secret.'
db_path = joinpath(globals.data_dir, globals.db_name)
db = DBManager(db_path)
db.connect()
fba = FBAccess(db)
fba.prompt_app_details()
db.disconnect()
def check_data_dir():
if not exists(globals.data_dir):
os.mkdir(globals.data_dir)
def main():
check_data_dir()
action.register("plot\s+(?P<type>user_posts)\s+(?P<first_name>\w+)\s+(?P<last_name>\w+)", fb.render_plot)
action.register("plot\s+(?P<type>\w+)(?:\s+(?P<count>\d+))?", fb.render_plot, {'type': str, 'count': int})
commandline = CommandLine()
try:
commandline.execute()
except CommandLineError as e:
print(e)
| mit | 7,284,842,101,302,858,000 | 18.789855 | 107 | 0.70011 | false |
awong1900/platformio | platformio/maintenance.py | 1 | 7117 | # Copyright (C) Ivan Kravets <[email protected]>
# See LICENSE for details.
import re
import struct
from os import remove
from os.path import isdir, isfile, join
from shutil import rmtree
from time import time
import click
from platformio import __version__, app, telemetry
from platformio.commands.install import cli as cmd_install
from platformio.commands.lib import lib_update as cmd_libraries_update
from platformio.commands.update import cli as cli_update
from platformio.commands.upgrade import get_latest_version
from platformio.exception import GetLatestVersionError, UpgraderFailed
from platformio.libmanager import LibraryManager
from platformio.platforms.base import PlatformFactory
from platformio.util import get_home_dir, get_lib_dir
def on_platformio_start(ctx):
telemetry.on_command(ctx)
after_upgrade(ctx)
check_platformio_upgrade()
check_internal_updates(ctx, "platforms")
check_internal_updates(ctx, "libraries")
def on_platformio_end(ctx, result): # pylint: disable=W0613
pass
def on_platformio_exception(e):
telemetry.on_exception(e)
class Upgrader(object):
def __init__(self, from_version, to_version):
self.from_version = self.version_to_int(from_version)
self.to_version = self.version_to_int(to_version)
self._upgraders = (
(self.version_to_int("0.9.0"), self._upgrade_to_0_9_0),
(self.version_to_int("1.0.0"), self._upgrade_to_1_0_0)
)
@staticmethod
def version_to_int(version):
match = re.match(r"(\d+)\.(\d+)\.(\d+)(\D+)?", version)
assert match is not None and len(match.groups()) is 4
verchrs = [chr(int(match.group(i))) for i in range(1, 4)]
verchrs.append(chr(255 if match.group(4) is None else 0))
return struct.unpack(">I", "".join(verchrs))
def run(self, ctx):
if self.from_version > self.to_version:
return True
result = [True]
for item in self._upgraders:
if self.from_version >= item[0]:
continue
result.append(item[1](ctx))
return all(result)
def _upgrade_to_0_9_0(self, ctx): # pylint: disable=R0201
prev_platforms = []
# remove platform's folder (obsoleted package structure)
for name in PlatformFactory.get_platforms().keys():
pdir = join(get_home_dir(), name)
if not isdir(pdir):
continue
prev_platforms.append(name)
rmtree(pdir)
# remove unused files
for fname in (".pioupgrade", "installed.json"):
if isfile(join(get_home_dir(), fname)):
remove(join(get_home_dir(), fname))
if prev_platforms:
ctx.invoke(cmd_install, platforms=prev_platforms)
return True
def _upgrade_to_1_0_0(self, ctx): # pylint: disable=R0201
installed_platforms = PlatformFactory.get_platforms(
installed=True).keys()
if installed_platforms:
ctx.invoke(cmd_install, platforms=installed_platforms)
ctx.invoke(cli_update)
return True
def after_upgrade(ctx):
last_version = app.get_state_item("last_version", "0.0.0")
if last_version == __version__:
return
# promotion
click.echo("\nIf you like %s, please:" % (
click.style("PlatformIO", fg="cyan")
))
click.echo(
"- %s us on Twitter to stay up-to-date "
"on the latest project news > %s" %
(click.style("follow", fg="cyan"),
click.style("https://twitter.com/PlatformIO_Org", fg="cyan"))
)
click.echo("- %s us a star on GitHub > %s" % (
click.style("give", fg="cyan"),
click.style("https://github.com/ivankravets/platformio", fg="cyan")
))
click.secho("Thanks a lot!\n", fg="green")
if last_version == "0.0.0":
app.set_state_item("last_version", __version__)
return
click.secho("Please wait while upgrading PlatformIO ...",
fg="yellow")
u = Upgrader(last_version, __version__)
if u.run(ctx):
app.set_state_item("last_version", __version__)
click.secho("PlatformIO has been successfully upgraded to %s!\n" %
__version__, fg="green")
telemetry.on_event(category="Auto", action="Upgrade",
label="%s > %s" % (last_version, __version__))
else:
raise UpgraderFailed()
click.echo("")
def check_platformio_upgrade():
last_check = app.get_state_item("last_check", {})
interval = int(app.get_setting("check_platformio_interval")) * 3600 * 24
if (time() - interval) < last_check.get("platformio_upgrade", 0):
return
last_check['platformio_upgrade'] = int(time())
app.set_state_item("last_check", last_check)
try:
latest_version = get_latest_version()
except GetLatestVersionError:
click.secho("Failed to check for PlatformIO upgrades", fg="red")
return
if (latest_version == __version__ or
Upgrader.version_to_int(latest_version) <
Upgrader.version_to_int(__version__)):
return
click.secho("There is a new version %s of PlatformIO available.\n"
"Please upgrade it via " % latest_version,
fg="yellow", nl=False)
click.secho("platformio upgrade", fg="cyan", nl=False)
click.secho(" command.\nChanges: ", fg="yellow", nl=False)
click.secho("http://docs.platformio.org/en/latest/history.html\n",
fg="cyan")
def check_internal_updates(ctx, what):
last_check = app.get_state_item("last_check", {})
interval = int(app.get_setting("check_%s_interval" % what)) * 3600 * 24
if (time() - interval) < last_check.get(what + "_update", 0):
return
last_check[what + '_update'] = int(time())
app.set_state_item("last_check", last_check)
outdated_items = []
if what == "platforms":
for platform in PlatformFactory.get_platforms(installed=True).keys():
p = PlatformFactory.newPlatform(platform)
if p.is_outdated():
outdated_items.append(platform)
elif what == "libraries":
lm = LibraryManager(get_lib_dir())
outdated_items = lm.get_outdated()
if not outdated_items:
return
click.secho("There are the new updates for %s (%s)" %
(what, ", ".join(outdated_items)), fg="yellow")
if not app.get_setting("auto_update_" + what):
click.secho("Please update them via ", fg="yellow", nl=False)
click.secho("`platformio %supdate`" %
("lib " if what == "libraries" else ""),
fg="cyan", nl=False)
click.secho(" command.\n", fg="yellow")
else:
click.secho("Please wait while updating %s ..." % what, fg="yellow")
if what == "platforms":
ctx.invoke(cli_update)
elif what == "libraries":
ctx.invoke(cmd_libraries_update)
click.echo()
telemetry.on_event(category="Auto", action="Update",
label=what.title())
| mit | -8,118,982,638,193,287,000 | 32.729858 | 77 | 0.603344 | false |
RPi-Distro/pgzero | pgzero/rect.py | 1 | 15799 | # -*- coding: utf-8 -*-
import pygame.rect
class Rect(pygame.rect.Rect):
__slots__ = ()
# From Pygame docs
VALID_ATTRIBUTES = """
x y
top left bottom right
topleft bottomleft topright bottomright
midtop midleft midbottom midright
center centerx centery
size width height
w h
""".split()
def __setattr__(self, key, value):
try:
pygame.rect.Rect.__setattr__(self, key, value)
except AttributeError as e:
from .spellcheck import suggest
suggestions = suggest(key, self.VALID_ATTRIBUTES)
msg = e.args[0]
if suggestions:
msg += "; did you mean {!r}?".format(suggestions[0])
raise AttributeError(msg) from None
Rect.__doc__ = pygame.rect.Rect.__doc__
class NoIntersect(Exception):
pass
class ZRect:
"""ZRect
This is a Python implementation of the pygame Rect class. Its raison
d'être is to allow the coordinates to be floating point. All pygame
functions which require a rect allow for an object with a "rect"
attribute and whose coordinates will be converted to integers implictly.
All functions which require a dict will use the flexible constructor
to convert from: this (or a subclass); a Pygame Rect; a 4-tuple or a
pair of 2-tuples. In addition, they'll recognise any object which has
an (optionally callable) .rect attribute whose value will be used instead.
"""
_item_mapping = dict(enumerate("xywh"))
def __init__(self, *args):
if len(args) == 1:
args = tuple(self._handle_one_arg(args[0]))
#
# At this point we have one of:
#
# x, y, w, h
# (x, y), (w, h)
# (x, y, w, h),
#
if len(args) == 4:
self.x, self.y, self.w, self.h = args
elif len(args) == 2:
(self.x, self.y), (self.w, self.h) = args
elif len(args) == 1:
self.x, self.y, self.w, self.h = args[0]
else:
raise TypeError("%s should be called with one, two or four arguments" % (cls.__name__))
self.rect = self
def _handle_one_arg(self, arg):
"""Handle -- possibly recursively -- the case of one parameter
Pygame -- and consequently pgzero -- is very accommodating when constructing
a rect. You can pass four integers, two pairs of 2-tuples, or one 4-tuple.
Also, you can pass an existing Rect-like object, or an object with a .rect
attribute. The object named by the .rect attribute is either one of the above,
or it's a callable object which returns one of the above.
This is evidently a recursive solution where an object with a .rect
attribute can yield an object with a .rect attribute, and so ad infinitum.
"""
#
# If the arg is an existing rect, return its elements
#
if isinstance(arg, RECT_CLASSES):
return arg.x, arg.y, arg.w, arg.h
#
# If it's something with a .rect attribute, start again with
# that attribute, calling it first if it's callable
#
if hasattr(arg, "rect"):
rectobj = arg.rect
if callable(rectobj):
rectobj = rectobj()
return self._handle_one_arg(rectobj)
#
# Otherwise, we assume it's an iterable of four elements
#
return arg
def __repr__(self):
return "<%s (x: %s, y: %s, w: %s, h: %s)>" % (self.__class__.__name__, self.x, self.y, self.w, self.h)
def __reduce__(self):
return self.__class__, (self.x, self.y, self.w, self.h)
def copy(self):
return self.__class__(self.x, self.y, self.w, self.h)
__copy__ = copy
def __len__(self):
return 4
def __getitem__(self, item):
try:
return getattr(self, self._item_mapping[item])
except KeyError:
raise IndexError
def __setitem__(self, item, value):
try:
attribute = self._item_mapping[item]
except KeyError:
raise IndexError
else:
setattr(attribute, value)
def __bool__(self):
return self.w != 0 and self.h != 0
def __iter__(self):
yield self.x
yield self.y
yield self.w
yield self.h
def __hash__(self):
raise TypeError("ZRect instances may not be used as dictionary keys")
def __eq__(self, *other):
rect = self.__class__(*other)
return (self.x, self.y, self.w, self.h) == (rect.x, rect.y, rect.w, rect.h)
def __ne__(self, *other):
rect = self.__class__(*other)
return (self.x, self.y, self.w, self.h) != (rect.x, rect.y, rect.w, rect.h)
def __lt__(self, *other):
rect = self.__class__(*other)
return (self.x, self.y, self.w, self.h) < (rect.x, rect.y, rect.w, rect.h)
def __gt__(self, *other):
rect = self.__class__(*other)
return (self.x, self.y, self.w, self.h) > (rect.x, rect.y, rect.w, rect.h)
def __le__(self, *other):
rect = self.__class__(*other)
return (self.x, self.y, self.w, self.h) <= (rect.x, rect.y, rect.w, rect.h)
def __ge__(self, *other):
rect = self.__class__(*other)
return (self.x, self.y, self.w, self.h) >= (rect.x, rect.y, rect.w, rect.h)
def __contains__(self, other):
"""Test whether a point (x, y) or another rectangle
(anything accepted by ZRect) is contained within this ZRect
"""
if len(other) == 2:
return self.collidepoint(*other)
else:
return self.contains(*other)
def _get_width(self):
return self.w
def _set_width(self, width):
self.w = width
width = property(_get_width, _set_width)
def _get_height(self):
return self.h
def _set_height(self, height):
self.h = height
height = property(_get_height, _set_height)
def _get_top(self):
return self.y
def _set_top(self, top):
self.y = top
top = property(_get_top, _set_top)
def _get_left(self):
return self.x
def _set_left(self, left):
self.x = left
left = property(_get_left, _set_left)
def _get_right(self):
return self.x + self.w
def _set_right(self, right):
self.x = right - self.w
right = property(_get_right, _set_right)
def _get_bottom(self):
return self.y + self.h
def _set_bottom(self, bottom):
self.y = bottom - self.h
bottom = property(_get_bottom, _set_bottom)
def _get_centerx(self):
return self.x + (self.w / 2)
def _set_centerx(self, centerx):
self.x = centerx - (self.w / 2)
centerx = property(_get_centerx, _set_centerx)
def _get_centery(self):
return self.y + (self.h / 2)
def _set_centery(self, centery):
self.y = centery - (self.h / 2)
centery = property(_get_centery, _set_centery)
def _get_topleft(self):
return self.x, self.y
def _set_topleft(self, topleft):
self.x, self.y = topleft
topleft = property(_get_topleft, _set_topleft)
def _get_topright(self):
return self.x + self.w, self.y
def _set_topright(self, topright):
x, y = topright
self.x = x - self.w
self.y = y
topright = property(_get_topright, _set_topright)
def _get_bottomleft(self):
return self.x, self.y + self.h
def _set_bottomleft(self, bottomleft):
x, y = bottomleft
self.x = x
self.y = y - self.h
bottomleft = property(_get_bottomleft, _set_bottomleft)
def _get_bottomright(self):
return self.x + self.w, self.y + self.h
def _set_bottomright(self, bottomright):
x, y = bottomright
self.x = x - self.w
self.y = y - self.h
bottomright = property(_get_bottomright, _set_bottomright)
def _get_midtop(self):
return self.x + self.w / 2, self.y
def _set_midtop(self, midtop):
x, y = midtop
self.x = x - self.w / 2
self.y = y
midtop = property(_get_midtop, _set_midtop)
def _get_midleft(self):
return self.x, self.y + self.h / 2
def _set_midleft(self, midleft):
x, y = midleft
self.x = x
self.y = y - self.h / 2
midleft = property(_get_midleft, _set_midleft)
def _get_midbottom(self):
return self.x + self.w / 2, self.y + self.h
def _set_midbottom(self, midbottom):
x, y = midbottom
self.x = x - self.w / 2
self.y = y - self.h
midbottom = property(_get_midbottom, _set_midbottom)
def _get_midright(self):
return self.x + self.w, self.y + self.h / 2
def _set_midright(self, midright):
x, y = midright
self.x = x - self.w
self.y = y - self.h / 2
midright = property(_get_midright, _set_midright)
def _get_center(self):
return self.x + self.w / 2, self.y + self.h / 2
def _set_center(self, center):
x, y = center
self.x = x - self.w / 2
self.y = y - self.h / 2
center = property(_get_center, _set_center)
def _get_size(self):
return self.w, self.h
def _set_size(self, size):
self.w, self.h = size
size = property(_get_size, _set_size)
def move(self, x, y):
return self.__class__(self.x + x, self.y + y, self.w, self.h)
def move_ip(self, x, y):
self.x += x
self.y += y
def _inflated(self, x, y):
return self.x - x / 2, self.y - y / 2, self.w + x, self.h + y
def inflate(self, x, y):
return self.__class__(*self._inflated(x, y))
def inflate_ip(self, x, y):
self.x, self.y, self.w, self.h = self._inflated(x, y)
def _clamped(self, *other):
rect = self.__class__(*other)
if self.w >= rect.w:
x = rect.x + rect.w / 2 - self.w / 2
elif self.x < rect.x:
x = rect.x
elif self.x + self.w > rect.x + rect.w:
x = rect.x + rect.w - self.w
else:
x = self.x
if self.h >= rect.h:
y = rect.y + rect.h / 2 - self.h / 2
elif self.y < rect.y:
y = rect.y
elif self.y + self.h > rect.y + rect.h:
y = rect.y + rect.h - self.h
else:
y = self.y
return x, y
def clamp(self, *other):
rect = self.__class__(*other)
x, y = self._clamped(rect)
return self.__class__(x, y, self.w, self.h)
def clamp_ip(self, *other):
rect = self.__class__(*other)
self.x, self.y = self._clamped(rect)
def _clipped(self, *other):
rect = self.__class__(*other)
if self.x >= rect.x and self.x < (rect.x + rect.w):
x = self.x
elif rect.x >= self.x and rect.x < (self.x + self.w):
x = rect.x
else:
raise NoIntersect
if (self.x + self.w) > rect.x and (self.x + self.w) <= (rect.x + rect.w):
w = self.x + self.w - x
elif (rect.x + rect.w) > self.x and (rect.x + rect.w) <= (self.x + self.w):
w = rect.x + rect.w - x
else:
raise NoIntersect
if self.y >= rect.y and self.y < (rect.y + rect.h):
y = self.y
elif rect.y >= self.y and rect.y < (self.y + self.h):
y = rect.y
else:
raise NoIntersect
if (self.y + self.h) > rect.y and (self.y + self.h) <= (rect.y + rect.h):
h = self.y + self.h - y
elif (rect.y + rect.h) > self.y and (rect.y + rect.h) <= (self.y + self.h):
h = rect.y + rect.h - y
else:
raise NoIntersect
return x, y, w, h
def clip(self, *other):
rect = self.__class__(*other)
try:
x, y, w, h = self._clipped(rect)
except NoIntersect:
x, y, w, h = self.x, self.y, 0, 0
return self.__class__(x, y, w, h)
def clip_ip(self, *other):
rect = self.__class__(*other)
try:
self.x, self.y, self.w, self.h = self._clipped(rect)
except NoIntersect:
self.x, self.y, self.w, self.h = self.x, self.y, 0, 0
def _unioned(self, *other):
rect = self.__class__(*other)
x = min(self.x, rect.x)
y = min(self.y, rect.y)
w = max(self.x + self.w, rect.x + rect.w) - x
h = max(self.y + self.h, rect.y + rect.h) - y
return x, y, w, h
def union(self, *other):
rect = self.__class__(*other)
return self.__class__(*self._unioned(rect))
def union_ip(self, *other):
rect = self.__class__(*other)
self.x, self.y, self.w, self.h = self._unioned(rect)
def _unionalled(self, others):
allrects = [self] + [self.__class__(other) for other in others]
x = min(r.x for r in allrects)
y = min(r.y for r in allrects)
w = max(r.x + r.w for r in allrects) - x
h = max(r.y + r.h for r in allrects) - y
return x, y, w, h
def unionall(self, others):
return self.__class__(*self._unionalled(others))
def unionall_ip(self, others):
self.x, self.y, self.w, self.h = self._unionalled(others)
def fit(self, *other):
rect = self.__class__(*other)
ratio = max(self.w / rect.w, self.h / rect.h)
w = self.w / ratio
h = self.h / ratio
x = rect.x + (rect.w - w) / 2
y = rect.y + (rect.h - h) / 2
return self.__class__(x, y, w, h)
def normalize(self):
if self.w < 0:
self.x += self.w
self.w = abs(self.w)
if self.h < 0:
self.y += self.h
self.h = abs(self.h)
def contains(self, *other):
rect = self.__class__(*other)
return (
self.x <= rect.x and
self.y <= rect.y and
self.x + self.w >= rect.x + rect.w and
self.y + self.h >= rect.y + rect.h and
self.x + self.w > rect.x and
self.y + self.h > rect.y
)
def collidepoint(self, *args):
if len(args) == 1:
x, y = args[0]
else:
x, y = args
return (
self.x <= x < (self.x + self.w) and
self.y <= y < (self.y + self.h)
)
def colliderect(self, *other):
rect = self.__class__(*other)
return (
self.x < rect.x + rect.w and
self.y < rect.y + rect.h and
self.x + self.w > rect.x and
self.y + self.h > rect.y
)
def collidelist(self, others):
for n, other in enumerate(others):
if self.colliderect(other):
return n
else:
return -1
def collidelistall(self, others):
return [n for n, other in enumerate(others) if self.colliderect(other)]
def collidedict(self, dict, use_values=True):
for k, v in dict.items():
if self.colliderect(v if use_values else k):
return k, v
def collidedictall(self, dict, use_values=True):
return [(k, v) for (k, v) in dict.items() if self.colliderect(v if use_values else k)]
RECT_CLASSES = (pygame.rect.Rect, ZRect)
| lgpl-3.0 | -2,143,938,976,755,739,000 | 29.722892 | 110 | 0.510318 | false |
red-hood/calendarserver | contrib/performance/report.py | 1 | 1237 | ##
# Copyright (c) 2010-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from __future__ import print_function
from benchlib import select
import sys
import pickle
def main():
if len(sys.argv) < 5:
print('Usage: %s <datafile> <benchmark name> <parameter value> <metric> [command]' % (sys.argv[0],))
else:
stat, samples = select(pickle.load(file(sys.argv[1])), *sys.argv[2:5])
if len(sys.argv) == 5:
print('Samples')
print('\t' + '\n\t'.join(map(str, stat.squash(samples))))
print('Commands')
print('\t' + '\n\t'.join(stat.commands))
else:
print(getattr(stat, sys.argv[5])(samples, *sys.argv[6:]))
| apache-2.0 | -8,329,028,068,985,315,000 | 35.382353 | 108 | 0.656427 | false |
Netflix-Skunkworks/cloudaux | cloudaux/orchestration/aws/sqs.py | 1 | 2280 | from cloudaux.aws.sqs import get_queue_url, get_queue_attributes, list_queue_tags, list_dead_letter_source_queues
from cloudaux.decorators import modify_output
from flagpole import FlagRegistry, Flags
import logging
from cloudaux.orchestration.aws import ARN
logger = logging.getLogger('cloudaux')
registry = FlagRegistry()
FLAGS = Flags('BASE', 'TAGS', 'DEAD_LETTER_SOURCE_QUEUES')
@registry.register(flag=FLAGS.TAGS, key='tags')
def get_sqs_tags(sqs_queue, **conn):
return list_queue_tags(QueueUrl=sqs_queue["QueueUrl"], **conn)
@registry.register(flag=FLAGS.DEAD_LETTER_SOURCE_QUEUES, key='dead_letter_source_queues')
def get_dead_letter_queues(sqs_queue, **conn):
return list_dead_letter_source_queues(QueueUrl=sqs_queue["QueueUrl"], **conn)
@registry.register(flag=FLAGS.BASE)
def get_base(sqs_queue, **conn):
sqs_queue["Attributes"] = get_queue_attributes(QueueUrl=sqs_queue["QueueUrl"], AttributeNames=["All"], **conn)
# Get the Queue name:
name = ARN(sqs_queue["Attributes"]["QueueArn"]).parsed_name
return {
'arn': sqs_queue["Attributes"]["QueueArn"],
'url': sqs_queue["QueueUrl"],
'name': name,
'region': conn['region'],
'attributes': sqs_queue["Attributes"],
'_version': 1
}
@modify_output
def get_queue(queue, flags=FLAGS.ALL, **conn):
"""
Orchestrates all the calls required to fully fetch details about an SQS Queue:
{
"Arn": ...,
"Region": ...,
"Name": ...,
"Url": ...,
"Attributes": ...,
"Tags": ...,
"DeadLetterSourceQueues": ...,
"_version": 1
}
:param queue: Either the queue name OR the queue url
:param flags: By default, set to ALL fields.
:param conn: dict containing enough information to make a connection to the desired account. Must at least have
'assume_role' key.
:return: dict containing a fully built out SQS queue.
"""
# Check if this is a Queue URL or a queue name:
if queue.startswith("https://") or queue.startswith("http://"):
queue_name = queue
else:
queue_name = get_queue_url(QueueName=queue, **conn)
sqs_queue = {"QueueUrl": queue_name}
return registry.build_out(flags, sqs_queue, **conn)
| apache-2.0 | 8,771,817,787,487,272,000 | 30.666667 | 115 | 0.645614 | false |
MarkNenadov/YNABpy | YNABpy/BaseClasses.py | 1 | 3510 | """
BaseClasses.py
INTRODUCTION
YNABpy - A Python module for the YNAB (You Need A Budget) application.
AUTHOR
Mark J. Nenadov (2011)
* Essex, Ontario
* Email: <[email protected]>
LICENSING
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version
This program is distributed in the hope that it will be useful
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
try:
from YNABpy.Support import xmlize
except ImportError:
print("FATAL ERROR, critical YNAB3py file missing: " + str(err))
class YNAB3_AccountingWidget(object):
"""
Base class for various YNAB3 things
(ie. YNAB3_Payee, YNAB3_Transaction)
"""
dom = None
fields_of_interest = [xmlize('memo'), xmlize('inflow'), xmlize('outflow')]
def __init__(self, transaction_dom, additional_fields_of_interest):
"""Constructor
"""
self.dom = transaction_dom
for field in additional_fields_of_interest:
if field not in self.fields_of_interest:
self.fields_of_interest.append(field)
for child in transaction_dom.childNodes:
self.load_properties(child)
def get_property(self, name):
""" get a property (return None if it doesn't exist)
We do this because this class loads properties from the xml
dynamically, so there's a chance some properties may be missing
"""
if hasattr(self, name):
return getattr(self, name)
return None
def get_inflow(self):
""" get_inflow
"""
return self.get_property('inflow')
def get_outflow(self):
""" get_outflow
"""
return self.get_property('outflow')
def get_balance(self):
""" get_balance
Get the balance for this transaction, accounting
for both outflow and inflow
"""
if self.get_outflow() != None and self.get_inflow() != None:
return float(self.get_inflow()) - float(self.get_outflow())
else:
return None
def get_memo(self):
""" get_memo
"""
return self.get_property('memo')
def toxml(self):
""" Get XML representation of this objects dom
"""
return self.dom.toxml()
class YNAB3_Lister(object):
"""
YNAB3_Lister base class
"""
contents = []
def __init__(self):
"""Constructor
"""
pass
def get_content(self):
""" return array of listed objects
"""
return self.contents
def add(self, t):
""" add an item
"""
self.contents.append(t)
def get_items_by_text_filter(self, field, filter_str):
""" Get items that have a argument-supplied property value that
matches a substring
"""
item_list = []
for item in self.get_content():
if item.get_property(field) != None:
if (item.get_property(field).find(filter_str) != -1):
item_list.append(item)
return item_list
| lgpl-3.0 | -2,300,809,593,029,452,800 | 22.092105 | 78 | 0.611111 | false |
zhuzhezhe/django_blog | mywebsite/settings.py | 1 | 3965 | #coding:utf-8
"""
Django settings for mywebsite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mx=_n)ji!d!+llfrhkwljbh9*0$l=4io@u0mchg4w#1w77xvk#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'suit', # django后台
#'django_admin_bootstrapped', #一个bootstrap样式的后台
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#apps
'blog',
#'markdown_deux', #markdown support
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mywebsite.urls'
WSGI_APPLICATION = 'mywebsite.wsgi.application'
'''
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
#配置mysql
# 线上数据库的配置
MYSQL_HOST = 'w.rdc.sae.sina.com.cn'
MYSQL_PORT = '3307'
MYSQL_USER = '02z4loxk1y'
MYSQL_PASS = 'iky3xmxxz4jwk1j401lzmzlzmhmykyll05kxkwmx'
MYSQL_DB = 'app_zhuzhezhe'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': MYSQL_DB,
'USER': MYSQL_USER,
'PASSWORD': MYSQL_PASS,
'HOST': MYSQL_HOST,
'PORT': MYSQL_PORT,
}
}
'''
# sqlite3配置
'''
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# mysql配置
'''
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mywebsite',
'HOST': '127.0.0.1',
'PORT': '3306',
'USER': 'root',
'PASSWORD': 'password',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
# 静态文件的相关设置
STATIC_URL = '/static/'
STATIC_ROOT = 'static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
'/blog/static/',
)
#STATIC_ROOT = "/blog/static/"
TEMPLATE_DIRS = (os.path.join(BASE_DIR, 'templates'),)
#other
DAB_FIELD_RENDERER = 'django_admin_bootstrapped.renderers.BootstrapFieldRenderer'
#这里是后台的配置
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP
TEMPLATE_CONTEXT_PROCESSORS = TCP + (
'django.core.context_processors.request',
)
| mit | 2,951,843,428,582,509,600 | 24.227273 | 81 | 0.663063 | false |
lesh1k/beatport-verifier | v 1/BP_top100_v_1.1.0.py | 1 | 3528 | #This program is supposed to fetch the top 100 list from Beatport and compare it to the previously
#saved list, then present the difference, hence the new entries.
#Dependencies. It is written in Python 2.7 on Windows and it uses BeautifulSoup4
####Version log##################
##################################################
# v. 1.0.0 Released on 3 December 2012 at 23:31
#
#Basic functionality. First working release.
#
##################################################
#v. 1.1.0 Released 21 March 2013 01:37
#
#The program was not "cross-platform" it could not be ran correctly from
#any, without specifying correct oldTracks list
#
#1) Added determining the path to old Tracklist
#
import urllib
import codecs
import os
import time
from bs4 import BeautifulSoup
oldList='old.txt'
newList='new.txt'
trackListFile='tracks.txt'
newEntries='NewTracks.txt'
folderName='Data'
curPath='/'.join(__file__.split('/')[:-1])
PATH=os.path.join(os.getcwd(), curPath, folderName)
VERSION='1.1.0'
def GetNewTrackList():
#Returns the list of new tracks
global PATH, folderName, trackListFile
fullPath=os.path.join(PATH,trackListFile)
if os.path.exists(fullPath):
oldData=ReadData(fullPath)
newData=DownloadTrackList()
return ComapreLists(oldData,newData)
else:
try:
os.mkdir(folderName)
print "The program is run for the first time.\n The directory and the initial list with 100 tracks will be created!"
except:
print 'The folder already exists!!'
newData=DownloadTrackList()
return ReadData(fullPath)
def DownloadTrackList():
#writes the data to the file returns the set of top 100 tracks from Beatport
URL="http://www.beatport.com/top-100"
html=urllib.urlopen(URL).read()
soup=BeautifulSoup(html)
data=''
#skip the first element because it's the name of the column in the table
trackList=soup.find_all('td',{'class':'secondColumn'})[1:]
for element in trackList:
data= data+codecs.encode(element.text,'utf-8')+'\n'
#Get rid of the last NewLine element
data=data[:-1]
WriteData(trackListFile,data)
data=data.split('\n')
return data
def ReadData(filePath):
#reads the list of tracks
toRead=open(filePath,'r')
data=toRead.read()
if data.find('\r\n'):
data=data.replace('\r\n','\n')
elif data.find('\r'):
data=data.replace('\r','\n')
data=data.split('\n')
toRead.close()
return data
def WriteData(fileName,data):
#Write the list of tracks to a file
global PATH
toWrite=open(os.path.join(PATH,fileName),'w')
toWrite.write(data)
toWrite.close()
def ComapreLists(oldL,newL):
#will return the list of new entries to the top-100. If any.
global newEntries
t=time.localtime()
locTime='Date: '+str(t[2])+'.'+str(t[1])+'.'+str(t[0])+'. Time: '+str(str(t[3])+'hrs '+str(t[4])+'mins '+str(t[5])+'s')
NewTracksList=[]
for track in newL:
if track not in oldL:
NewTracksList.append(track)
prettyResult=locTime+'\n\n\n'
if len(NewTracksList)==0:
NewTracksList.append("No New Entries Yet!!")
#fromat the result before writing/printing
for element in NewTracksList:
prettyResult=prettyResult+element+'\n'
WriteData(newEntries, prettyResult)
return prettyResult
if __name__=="__main__":
print 'Hello! I am Beatport verifier version '+VERSION+'\nI am already downloading the updated tracklist. Please be patient...\n\n'
result= GetNewTrackList()
if not raw_input("Print the list of new tracks? (ENTER - yes/ any character, then ENTER - no) "):
print '\n\n'+result
raw_input('Execution has finished. Press any key...')
| cc0-1.0 | -2,010,492,077,310,553,000 | 24.021277 | 132 | 0.692744 | false |
jingzhehu/udacity_mlnd | P4_Training_a_Smartcab_to_Drive/smartcab/simulator.py | 1 | 8851 | import os
import time
import random
import importlib
class Simulator(object):
"""Simulates agents in a dynamic smartcab environment.
Uses PyGame to display GUI, if available.
"""
colors = {
'black' : ( 0, 0, 0),
'white' : (255, 255, 255),
'red' : (255, 0, 0),
'green' : ( 0, 255, 0),
'blue' : ( 0, 0, 255),
'cyan' : ( 0, 200, 200),
'magenta' : (200, 0, 200),
'yellow' : (255, 255, 0),
'orange' : (255, 128, 0)
}
def __init__(self, env, size=None, update_delay=1.0, display=True):
self.env = env
self.size = size if size is not None else ((self.env.grid_size[0] + 1) * self.env.block_size, (self.env.grid_size[1] + 1) * self.env.block_size)
self.width, self.height = self.size
self.bg_color = self.colors['white']
self.road_width = 5
self.road_color = self.colors['black']
self.quit = False
self.start_time = None
self.current_time = 0.0
self.last_updated = 0.0
self.update_delay = update_delay
self.display = display
if self.display:
try:
self.pygame = importlib.import_module('pygame')
self.pygame.init()
self.screen = self.pygame.display.set_mode(self.size)
self.frame_delay = max(1, int(self.update_delay * 1000)) # delay between GUI frames in ms (min: 1)
self.agent_sprite_size = (32, 32)
self.agent_circle_radius = 10 # radius of circle, when using simple representation
for agent in self.env.agent_states:
agent._sprite = self.pygame.transform.smoothscale(
self.pygame.image.load(os.path.join("images", "car-{}.png".format(agent.color))), self.agent_sprite_size)
agent._sprite_size = (agent._sprite.get_width(), agent._sprite.get_height())
self.font = self.pygame.font.Font(None, 28)
self.paused = False
except ImportError as e:
self.display = False
print "Simulator.__init__(): Unable to import pygame; display disabled.\n{}: {}".format(e.__class__.__name__, e)
except Exception as e:
self.display = False
print "Simulator.__init__(): Error initializing GUI objects; display disabled.\n{}: {}".format(e.__class__.__name__, e)
def run(self, n_trials=1):
self.quit = False
for trial in xrange(n_trials):
# print "Simulator.run(): Trial {}".format(trial) # [debug]
self.env.reset()
self.current_time = 0.0
self.last_updated = 0.0
self.start_time = time.time()
while True:
try:
# Update current time
self.current_time = time.time() - self.start_time
#print "Simulator.run(): current_time = {:.3f}".format(self.current_time)
# Handle GUI events
if self.display:
for event in self.pygame.event.get():
if event.type == self.pygame.QUIT:
self.quit = True
elif event.type == self.pygame.KEYDOWN:
if event.key == 27: # Esc
self.quit = True
elif event.unicode == u' ':
self.paused = True
if self.paused:
self.pause()
# Update environment
if self.current_time - self.last_updated >= self.update_delay:
self.env.step(trial=trial)
self.last_updated = self.current_time
# Render GUI and sleep
if self.display:
self.render()
self.pygame.time.wait(self.frame_delay)
except KeyboardInterrupt:
self.quit = True
finally:
if self.quit or self.env.done:
break
if self.quit:
break
def render(self):
# Clear screen
self.screen.fill(self.bg_color)
# Draw elements
# * Static elements
for road in self.env.roads:
self.pygame.draw.line(self.screen, self.road_color, (road[0][0] * self.env.block_size, road[0][1] * self.env.block_size), (road[1][0] * self.env.block_size, road[1][1] * self.env.block_size), self.road_width)
for intersection, traffic_light in self.env.intersections.iteritems():
self.pygame.draw.circle(self.screen, self.road_color, (intersection[0] * self.env.block_size, intersection[1] * self.env.block_size), 10)
if traffic_light.state: # North-South is open
self.pygame.draw.line(self.screen, self.colors['green'],
(intersection[0] * self.env.block_size, intersection[1] * self.env.block_size - 15),
(intersection[0] * self.env.block_size, intersection[1] * self.env.block_size + 15), self.road_width)
else: # East-West is open
self.pygame.draw.line(self.screen, self.colors['green'],
(intersection[0] * self.env.block_size - 15, intersection[1] * self.env.block_size),
(intersection[0] * self.env.block_size + 15, intersection[1] * self.env.block_size), self.road_width)
# * Dynamic elements
for agent, state in self.env.agent_states.iteritems():
# Compute precise agent location here (back from the intersection some)
agent_offset = (2 * state['heading'][0] * self.agent_circle_radius, 2 * state['heading'][1] * self.agent_circle_radius)
agent_pos = (state['location'][0] * self.env.block_size - agent_offset[0], state['location'][1] * self.env.block_size - agent_offset[1])
agent_color = self.colors[agent.color]
if hasattr(agent, '_sprite') and agent._sprite is not None:
# Draw agent sprite (image), properly rotated
rotated_sprite = agent._sprite if state['heading'] == (1, 0) else self.pygame.transform.rotate(agent._sprite, 180 if state['heading'][0] == -1 else state['heading'][1] * -90)
self.screen.blit(rotated_sprite,
self.pygame.rect.Rect(agent_pos[0] - agent._sprite_size[0] / 2, agent_pos[1] - agent._sprite_size[1] / 2,
agent._sprite_size[0], agent._sprite_size[1]))
else:
# Draw simple agent (circle with a short line segment poking out to indicate heading)
self.pygame.draw.circle(self.screen, agent_color, agent_pos, self.agent_circle_radius)
self.pygame.draw.line(self.screen, agent_color, agent_pos, state['location'], self.road_width)
if agent.get_next_waypoint() is not None:
self.screen.blit(self.font.render(agent.get_next_waypoint(), True, agent_color, self.bg_color), (agent_pos[0] + 10, agent_pos[1] + 10))
if state['destination'] is not None:
self.pygame.draw.circle(self.screen, agent_color, (state['destination'][0] * self.env.block_size, state['destination'][1] * self.env.block_size), 6)
self.pygame.draw.circle(self.screen, agent_color, (state['destination'][0] * self.env.block_size, state['destination'][1] * self.env.block_size), 15, 2)
# * Overlays
text_y = 10
for text in self.env.status_text.split('\n'):
self.screen.blit(self.font.render(text, True, self.colors['red'], self.bg_color), (100, text_y))
text_y += 20
text_y = 10
time_str = 'time: ' + str(self.env.t)
self.screen.blit(self.font.render(time_str, True, self.colors['red'], self.bg_color), (600, text_y))
# Flip buffers
self.pygame.display.flip()
def pause(self):
abs_pause_time = time.time()
pause_text = "[PAUSED] Press any key to continue..."
self.screen.blit(self.font.render(pause_text, True, self.colors['cyan'], self.bg_color), (100, self.height - 40))
self.pygame.display.flip()
print pause_text # [debug]
while self.paused:
for event in self.pygame.event.get():
if event.type == self.pygame.KEYDOWN:
self.paused = False
self.pygame.time.wait(self.frame_delay)
self.screen.blit(self.font.render(pause_text, True, self.bg_color, self.bg_color), (100, self.height - 40))
self.start_time += (time.time() - abs_pause_time)
| mit | -5,244,304,932,999,362,000 | 49.00565 | 220 | 0.54073 | false |
ctwiz/stardust | qa/rpc-tests/test_framework/key.py | 1 | 7367 | # Copyright (c) 2011 Sam Rushing
#
# key.py - OpenSSL wrapper
#
# This file is modified from python-stardustlib.
#
"""ECC secp256k1 crypto routines
WARNING: This module does not mlock() secrets; your private keys may end up on
disk in swap! Use with caution!
"""
import ctypes
import ctypes.util
import hashlib
import sys
ssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library ('ssl') or 'libeay32')
ssl.BN_new.restype = ctypes.c_void_p
ssl.BN_new.argtypes = []
ssl.BN_bin2bn.restype = ctypes.c_void_p
ssl.BN_bin2bn.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_void_p]
ssl.BN_CTX_free.restype = None
ssl.BN_CTX_free.argtypes = [ctypes.c_void_p]
ssl.BN_CTX_new.restype = ctypes.c_void_p
ssl.BN_CTX_new.argtypes = []
ssl.ECDH_compute_key.restype = ctypes.c_int
ssl.ECDH_compute_key.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
ssl.ECDSA_sign.restype = ctypes.c_int
ssl.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
ssl.ECDSA_verify.restype = ctypes.c_int
ssl.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
ssl.EC_KEY_free.restype = None
ssl.EC_KEY_free.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int]
ssl.EC_KEY_get0_group.restype = ctypes.c_void_p
ssl.EC_KEY_get0_group.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_get0_public_key.restype = ctypes.c_void_p
ssl.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_set_private_key.restype = ctypes.c_int
ssl.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_KEY_set_conv_form.restype = None
ssl.EC_KEY_set_conv_form.argtypes = [ctypes.c_void_p, ctypes.c_int]
ssl.EC_KEY_set_public_key.restype = ctypes.c_int
ssl.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.i2o_ECPublicKey.restype = ctypes.c_void_p
ssl.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_POINT_new.restype = ctypes.c_void_p
ssl.EC_POINT_new.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_free.restype = None
ssl.EC_POINT_free.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_mul.restype = ctypes.c_int
ssl.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
# this specifies the curve used with ECDSA.
NID_secp256k1 = 714 # from openssl/obj_mac.h
# Thx to Sam Devlin for the ctypes magic 64-bit fix.
def _check_result(val, func, args):
if val == 0:
raise ValueError
else:
return ctypes.c_void_p (val)
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.errcheck = _check_result
class CECKey(object):
"""Wrapper around OpenSSL's EC_KEY"""
POINT_CONVERSION_COMPRESSED = 2
POINT_CONVERSION_UNCOMPRESSED = 4
def __init__(self):
self.k = ssl.EC_KEY_new_by_curve_name(NID_secp256k1)
def __del__(self):
if ssl:
ssl.EC_KEY_free(self.k)
self.k = None
def set_secretbytes(self, secret):
priv_key = ssl.BN_bin2bn(secret, 32, ssl.BN_new())
group = ssl.EC_KEY_get0_group(self.k)
pub_key = ssl.EC_POINT_new(group)
ctx = ssl.BN_CTX_new()
if not ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx):
raise ValueError("Could not derive public key from the supplied secret.")
ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx)
ssl.EC_KEY_set_private_key(self.k, priv_key)
ssl.EC_KEY_set_public_key(self.k, pub_key)
ssl.EC_POINT_free(pub_key)
ssl.BN_CTX_free(ctx)
return self.k
def set_privkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.d2i_ECPrivateKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def set_pubkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.o2i_ECPublicKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def get_privkey(self):
size = ssl.i2d_ECPrivateKey(self.k, 0)
mb_pri = ctypes.create_string_buffer(size)
ssl.i2d_ECPrivateKey(self.k, ctypes.byref(ctypes.pointer(mb_pri)))
return mb_pri.raw
def get_pubkey(self):
size = ssl.i2o_ECPublicKey(self.k, 0)
mb = ctypes.create_string_buffer(size)
ssl.i2o_ECPublicKey(self.k, ctypes.byref(ctypes.pointer(mb)))
return mb.raw
def get_raw_ecdh_key(self, other_pubkey):
ecdh_keybuffer = ctypes.create_string_buffer(32)
r = ssl.ECDH_compute_key(ctypes.pointer(ecdh_keybuffer), 32,
ssl.EC_KEY_get0_public_key(other_pubkey.k),
self.k, 0)
if r != 32:
raise Exception('CKey.get_ecdh_key(): ECDH_compute_key() failed')
return ecdh_keybuffer.raw
def get_ecdh_key(self, other_pubkey, kdf=lambda k: hashlib.sha256(k).digest()):
# FIXME: be warned it's not clear what the kdf should be as a default
r = self.get_raw_ecdh_key(other_pubkey)
return kdf(r)
def sign(self, hash):
# FIXME: need unit tests for below cases
if not isinstance(hash, bytes):
raise TypeError('Hash must be bytes instance; got %r' % hash.__class__)
if len(hash) != 32:
raise ValueError('Hash must be exactly 32 bytes long')
sig_size0 = ctypes.c_uint32()
sig_size0.value = ssl.ECDSA_size(self.k)
mb_sig = ctypes.create_string_buffer(sig_size0.value)
result = ssl.ECDSA_sign(0, hash, len(hash), mb_sig, ctypes.byref(sig_size0), self.k)
assert 1 == result
return mb_sig.raw[:sig_size0.value]
def verify(self, hash, sig):
"""Verify a DER signature"""
return ssl.ECDSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1
def set_compressed(self, compressed):
if compressed:
form = self.POINT_CONVERSION_COMPRESSED
else:
form = self.POINT_CONVERSION_UNCOMPRESSED
ssl.EC_KEY_set_conv_form(self.k, form)
class CPubKey(bytes):
"""An encapsulated public key
Attributes:
is_valid - Corresponds to CPubKey.IsValid()
is_fullyvalid - Corresponds to CPubKey.IsFullyValid()
is_compressed - Corresponds to CPubKey.IsCompressed()
"""
def __new__(cls, buf, _cec_key=None):
self = super(CPubKey, cls).__new__(cls, buf)
if _cec_key is None:
_cec_key = CECKey()
self._cec_key = _cec_key
self.is_fullyvalid = _cec_key.set_pubkey(self) != 0
return self
@property
def is_valid(self):
return len(self) > 0
@property
def is_compressed(self):
return len(self) == 33
def verify(self, hash, sig):
return self._cec_key.verify(hash, sig)
def __str__(self):
return repr(self)
def __repr__(self):
# Always have represent as b'<secret>' so test cases don't have to
# change for py2/3
if sys.version > '3':
return '%s(%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
else:
return '%s(b%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
| mit | 4,338,475,104,793,338,400 | 33.265116 | 130 | 0.641102 | false |
openstack/oslo.versionedobjects | oslo_versionedobjects/base.py | 1 | 51253 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Common internal object model"""
import abc
import collections
from collections import abc as collections_abc
import copy
import functools
import logging
import warnings
import oslo_messaging as messaging
from oslo_utils import excutils
from oslo_utils import versionutils as vutils
from oslo_versionedobjects._i18n import _
from oslo_versionedobjects import exception
from oslo_versionedobjects import fields as obj_fields
LOG = logging.getLogger('object')
class _NotSpecifiedSentinel(object):
pass
def _get_attrname(name):
"""Return the mangled name of the attribute's underlying storage."""
return '_obj_' + name
def _make_class_properties(cls):
# NOTE(danms/comstud): Inherit fields from super classes.
# mro() returns the current class first and returns 'object' last, so
# those can be skipped. Also be careful to not overwrite any fields
# that already exist. And make sure each cls has its own copy of
# fields and that it is not sharing the dict with a super class.
cls.fields = dict(cls.fields)
for supercls in cls.mro()[1:-1]:
if not hasattr(supercls, 'fields'):
continue
for name, field in supercls.fields.items():
if name not in cls.fields:
cls.fields[name] = field
for name, field in cls.fields.items():
if not isinstance(field, obj_fields.Field):
raise exception.ObjectFieldInvalid(
field=name, objname=cls.obj_name())
def getter(self, name=name):
attrname = _get_attrname(name)
if not hasattr(self, attrname):
self.obj_load_attr(name)
return getattr(self, attrname)
def setter(self, value, name=name, field=field):
attrname = _get_attrname(name)
field_value = field.coerce(self, name, value)
if field.read_only and hasattr(self, attrname):
# Note(yjiang5): _from_db_object() may iterate
# every field and write, no exception in such situation.
if getattr(self, attrname) != field_value:
raise exception.ReadOnlyFieldError(field=name)
else:
return
self._changed_fields.add(name)
try:
return setattr(self, attrname, field_value)
except Exception:
with excutils.save_and_reraise_exception():
attr = "%s.%s" % (self.obj_name(), name)
LOG.exception('Error setting %(attr)s',
{'attr': attr})
def deleter(self, name=name):
attrname = _get_attrname(name)
if not hasattr(self, attrname):
raise AttributeError("No such attribute `%s'" % name)
delattr(self, attrname)
setattr(cls, name, property(getter, setter, deleter))
class VersionedObjectRegistry(object):
_registry = None
def __new__(cls, *args, **kwargs):
if not VersionedObjectRegistry._registry:
VersionedObjectRegistry._registry = object.__new__(
VersionedObjectRegistry, *args, **kwargs)
VersionedObjectRegistry._registry._obj_classes = \
collections.defaultdict(list)
self = object.__new__(cls, *args, **kwargs)
self._obj_classes = VersionedObjectRegistry._registry._obj_classes
return self
def registration_hook(self, cls, index):
pass
def _register_class(self, cls):
def _vers_tuple(obj):
return vutils.convert_version_to_tuple(obj.VERSION)
_make_class_properties(cls)
obj_name = cls.obj_name()
for i, obj in enumerate(self._obj_classes[obj_name]):
self.registration_hook(cls, i)
if cls.VERSION == obj.VERSION:
self._obj_classes[obj_name][i] = cls
break
if _vers_tuple(cls) > _vers_tuple(obj):
# Insert before.
self._obj_classes[obj_name].insert(i, cls)
break
else:
# Either this is the first time we've seen the object or it's
# an older version than anything we'e seen.
self._obj_classes[obj_name].append(cls)
self.registration_hook(cls, 0)
@classmethod
def register(cls, obj_cls):
registry = cls()
registry._register_class(obj_cls)
return obj_cls
@classmethod
def register_if(cls, condition):
def wraps(obj_cls):
if condition:
obj_cls = cls.register(obj_cls)
else:
_make_class_properties(obj_cls)
return obj_cls
return wraps
@classmethod
def objectify(cls, obj_cls):
return cls.register_if(False)(obj_cls)
@classmethod
def obj_classes(cls):
registry = cls()
return registry._obj_classes
# These are decorators that mark an object's method as remotable.
# If the metaclass is configured to forward object methods to an
# indirection service, these will result in making an RPC call
# instead of directly calling the implementation in the object. Instead,
# the object implementation on the remote end will perform the
# requested action and the result will be returned here.
def remotable_classmethod(fn):
"""Decorator for remotable classmethods."""
@functools.wraps(fn)
def wrapper(cls, context, *args, **kwargs):
if cls.indirection_api:
version_manifest = obj_tree_get_versions(cls.obj_name())
try:
result = cls.indirection_api.object_class_action_versions(
context, cls.obj_name(), fn.__name__, version_manifest,
args, kwargs)
except NotImplementedError:
# FIXME(danms): Maybe start to warn here about deprecation?
result = cls.indirection_api.object_class_action(
context, cls.obj_name(), fn.__name__, cls.VERSION,
args, kwargs)
else:
result = fn(cls, context, *args, **kwargs)
if isinstance(result, VersionedObject):
result._context = context
return result
# NOTE(danms): Make this discoverable
wrapper.remotable = True
wrapper.original_fn = fn
return classmethod(wrapper)
# See comment above for remotable_classmethod()
#
# Note that this will use either the provided context, or the one
# stashed in the object. If neither are present, the object is
# "orphaned" and remotable methods cannot be called.
def remotable(fn):
"""Decorator for remotable object methods."""
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
ctxt = self._context
if ctxt is None:
raise exception.OrphanedObjectError(method=fn.__name__,
objtype=self.obj_name())
if self.indirection_api:
updates, result = self.indirection_api.object_action(
ctxt, self, fn.__name__, args, kwargs)
for key, value in updates.items():
if key in self.fields:
field = self.fields[key]
# NOTE(ndipanov): Since VersionedObjectSerializer will have
# deserialized any object fields into objects already,
# we do not try to deserialize them again here.
if isinstance(value, VersionedObject):
setattr(self, key, value)
else:
setattr(self, key,
field.from_primitive(self, key, value))
self.obj_reset_changes()
self._changed_fields = set(updates.get('obj_what_changed', []))
return result
else:
return fn(self, *args, **kwargs)
wrapper.remotable = True
wrapper.original_fn = fn
return wrapper
class VersionedObject(object):
"""Base class and object factory.
This forms the base of all objects that can be remoted or instantiated
via RPC. Simply defining a class that inherits from this base class
will make it remotely instantiatable. Objects should implement the
necessary "get" classmethod routines as well as "save" object methods
as appropriate.
"""
indirection_api = None
# Object versioning rules
#
# Each service has its set of objects, each with a version attached. When
# a client attempts to call an object method, the server checks to see if
# the version of that object matches (in a compatible way) its object
# implementation. If so, cool, and if not, fail.
#
# This version is allowed to have three parts, X.Y.Z, where the .Z element
# is reserved for stable branch backports. The .Z is ignored for the
# purposes of triggering a backport, which means anything changed under
# a .Z must be additive and non-destructive such that a node that knows
# about X.Y can consider X.Y.Z equivalent.
VERSION = '1.0'
# Object namespace for serialization
# NB: Generally this should not be changed, but is needed for backwards
# compatibility
OBJ_SERIAL_NAMESPACE = 'versioned_object'
# Object project namespace for serialization
# This is used to disambiguate owners of objects sharing a common RPC
# medium
OBJ_PROJECT_NAMESPACE = 'versionedobjects'
# The fields present in this object as key:field pairs. For example:
#
# fields = { 'foo': obj_fields.IntegerField(),
# 'bar': obj_fields.StringField(),
# }
fields = {}
obj_extra_fields = []
# Table of sub-object versioning information
#
# This contains a list of version mappings, by the field name of
# the subobject. The mappings must be in order of oldest to
# newest, and are tuples of (my_version, subobject_version). A
# request to backport this object to $my_version will cause the
# subobject to be backported to $subobject_version.
#
# obj_relationships = {
# 'subobject1': [('1.2', '1.1'), ('1.4', '1.2')],
# 'subobject2': [('1.2', '1.0')],
# }
#
# In the above example:
#
# - If we are asked to backport our object to version 1.3,
# subobject1 will be backported to version 1.1, since it was
# bumped to version 1.2 when our version was 1.4.
# - If we are asked to backport our object to version 1.5,
# no changes will be made to subobject1 or subobject2, since
# they have not changed since version 1.4.
# - If we are asked to backlevel our object to version 1.1, we
# will remove both subobject1 and subobject2 from the primitive,
# since they were not added until version 1.2.
obj_relationships = {}
def __init__(self, context=None, **kwargs):
self._changed_fields = set()
self._context = context
for key in kwargs.keys():
setattr(self, key, kwargs[key])
def __repr__(self):
repr_str = '%s(%s)' % (
self.obj_name(),
','.join(['%s=%s' % (name,
(self.obj_attr_is_set(name) and
field.stringify(getattr(self, name)) or
'<?>'))
for name, field in sorted(self.fields.items())]))
return repr_str
def __contains__(self, name):
try:
return self.obj_attr_is_set(name)
except AttributeError:
return False
@classmethod
def to_json_schema(cls):
obj_name = cls.obj_name()
schema = {
'$schema': 'http://json-schema.org/draft-04/schema#',
'title': obj_name,
}
schema.update(obj_fields.Object(obj_name).get_schema())
return schema
@classmethod
def obj_name(cls):
"""Return the object's name
Return a canonical name for this object which will be used over
the wire for remote hydration.
"""
return cls.__name__
@classmethod
def _obj_primitive_key(cls, field):
return '%s.%s' % (cls.OBJ_SERIAL_NAMESPACE, field)
@classmethod
def _obj_primitive_field(cls, primitive, field,
default=obj_fields.UnspecifiedDefault):
key = cls._obj_primitive_key(field)
if default == obj_fields.UnspecifiedDefault:
return primitive[key]
else:
return primitive.get(key, default)
@classmethod
def obj_class_from_name(cls, objname, objver):
"""Returns a class from the registry based on a name and version."""
if objname not in VersionedObjectRegistry.obj_classes():
LOG.error('Unable to instantiate unregistered object type '
'%(objtype)s'), dict(objtype=objname)
raise exception.UnsupportedObjectError(objtype=objname)
# NOTE(comstud): If there's not an exact match, return the highest
# compatible version. The objects stored in the class are sorted
# such that highest version is first, so only set compatible_match
# once below.
compatible_match = None
for objclass in VersionedObjectRegistry.obj_classes()[objname]:
if objclass.VERSION == objver:
return objclass
if (not compatible_match and
vutils.is_compatible(objver, objclass.VERSION)):
compatible_match = objclass
if compatible_match:
return compatible_match
# As mentioned above, latest version is always first in the list.
latest_ver = VersionedObjectRegistry.obj_classes()[objname][0].VERSION
raise exception.IncompatibleObjectVersion(objname=objname,
objver=objver,
supported=latest_ver)
@classmethod
def _obj_from_primitive(cls, context, objver, primitive):
self = cls()
self._context = context
self.VERSION = objver
objdata = cls._obj_primitive_field(primitive, 'data')
changes = cls._obj_primitive_field(primitive, 'changes', [])
for name, field in self.fields.items():
if name in objdata:
setattr(self, name, field.from_primitive(self, name,
objdata[name]))
self._changed_fields = set([x for x in changes if x in self.fields])
return self
@classmethod
def obj_from_primitive(cls, primitive, context=None):
"""Object field-by-field hydration."""
objns = cls._obj_primitive_field(primitive, 'namespace')
objname = cls._obj_primitive_field(primitive, 'name')
objver = cls._obj_primitive_field(primitive, 'version')
if objns != cls.OBJ_PROJECT_NAMESPACE:
# NOTE(danms): We don't do anything with this now, but it's
# there for "the future"
raise exception.UnsupportedObjectError(
objtype='%s.%s' % (objns, objname))
objclass = cls.obj_class_from_name(objname, objver)
return objclass._obj_from_primitive(context, objver, primitive)
def __deepcopy__(self, memo):
"""Efficiently make a deep copy of this object."""
# NOTE(danms): A naive deepcopy would copy more than we need,
# and since we have knowledge of the volatile bits of the
# object, we can be smarter here. Also, nested entities within
# some objects may be uncopyable, so we can avoid those sorts
# of issues by copying only our field data.
nobj = self.__class__()
# NOTE(sskripnick): we should save newly created object into mem
# to let deepcopy know which branches are already created.
# See launchpad bug #1602314 for more details
memo[id(self)] = nobj
nobj._context = self._context
for name in self.fields:
if self.obj_attr_is_set(name):
nval = copy.deepcopy(getattr(self, name), memo)
setattr(nobj, name, nval)
nobj._changed_fields = set(self._changed_fields)
return nobj
def obj_clone(self):
"""Create a copy."""
return copy.deepcopy(self)
def _obj_relationship_for(self, field, target_version):
# NOTE(danms): We need to be graceful about not having the temporary
# version manifest if called from obj_make_compatible().
if (not hasattr(self, '_obj_version_manifest') or
self._obj_version_manifest is None):
try:
return self.obj_relationships[field]
except KeyError:
raise exception.ObjectActionError(
action='obj_make_compatible',
reason='No rule for %s' % field)
objname = self.fields[field].objname
if objname not in self._obj_version_manifest:
return
# NOTE(danms): Compute a relationship mapping that looks like
# what the caller expects.
return [(target_version, self._obj_version_manifest[objname])]
def _obj_make_obj_compatible(self, primitive, target_version, field):
"""Backlevel a sub-object based on our versioning rules.
This is responsible for backporting objects contained within
this object's primitive according to a set of rules we
maintain about version dependencies between objects. This
requires that the obj_relationships table in this object is
correct and up-to-date.
:param:primitive: The primitive version of this object
:param:target_version: The version string requested for this object
:param:field: The name of the field in this object containing the
sub-object to be backported
"""
relationship_map = self._obj_relationship_for(field, target_version)
if not relationship_map:
# NOTE(danms): This means the field was not specified in the
# version manifest from the client, so it must not want this
# field, so skip.
return
try:
_get_subobject_version(target_version,
relationship_map,
lambda ver: _do_subobject_backport(
ver, self, field, primitive))
except exception.TargetBeforeSubobjectExistedException:
# Subobject did not exist, so delete it from the primitive
del primitive[field]
def obj_make_compatible(self, primitive, target_version):
"""Make an object representation compatible with a target version.
This is responsible for taking the primitive representation of
an object and making it suitable for the given target_version.
This may mean converting the format of object attributes, removing
attributes that have been added since the target version, etc. In
general:
- If a new version of an object adds a field, this routine
should remove it for older versions.
- If a new version changed or restricted the format of a field, this
should convert it back to something a client knowing only of the
older version will tolerate.
- If an object that this object depends on is bumped, then this
object should also take a version bump. Then, this routine should
backlevel the dependent object (by calling its obj_make_compatible())
if the requested version of this object is older than the version
where the new dependent object was added.
:param primitive: The result of :meth:`obj_to_primitive`
:param target_version: The version string requested by the recipient
of the object
:raises: :exc:`oslo_versionedobjects.exception.UnsupportedObjectError`
if conversion is not possible for some reason
"""
for key, field in self.fields.items():
if not isinstance(field, (obj_fields.ObjectField,
obj_fields.ListOfObjectsField)):
continue
if not self.obj_attr_is_set(key):
continue
self._obj_make_obj_compatible(primitive, target_version, key)
def obj_make_compatible_from_manifest(self, primitive, target_version,
version_manifest):
# NOTE(danms): Stash the manifest on the object so we can use it in
# the deeper layers. We do this because obj_make_compatible() is
# defined library API at this point, yet we need to get this manifest
# to the other bits that get called so we can propagate it to child
# calls. It's not pretty, but a tactical solution. Ideally we will
# either evolve or deprecate obj_make_compatible() in a major version
# bump.
self._obj_version_manifest = version_manifest
try:
return self.obj_make_compatible(primitive, target_version)
finally:
delattr(self, '_obj_version_manifest')
def obj_to_primitive(self, target_version=None, version_manifest=None):
"""Simple base-case dehydration.
This calls to_primitive() for each item in fields.
"""
if target_version is None:
target_version = self.VERSION
if (vutils.convert_version_to_tuple(target_version) >
vutils.convert_version_to_tuple(self.VERSION)):
raise exception.InvalidTargetVersion(version=target_version)
primitive = dict()
for name, field in self.fields.items():
if self.obj_attr_is_set(name):
primitive[name] = field.to_primitive(self, name,
getattr(self, name))
# NOTE(danms): If we know we're being asked for a different version,
# then do the compat step. However, even if we think we're not,
# we may have sub-objects that need it, so if we have a manifest we
# have to traverse this object just in case. Previously, we
# required a parent version bump for any child, so the target
# check was enough.
if target_version != self.VERSION or version_manifest:
self.obj_make_compatible_from_manifest(primitive,
target_version,
version_manifest)
obj = {self._obj_primitive_key('name'): self.obj_name(),
self._obj_primitive_key('namespace'): (
self.OBJ_PROJECT_NAMESPACE),
self._obj_primitive_key('version'): target_version,
self._obj_primitive_key('data'): primitive}
if self.obj_what_changed():
# NOTE(cfriesen): if we're downgrading to a lower version, then
# it's possible that self.obj_what_changed() includes fields that
# no longer exist in the lower version. If so, filter them out.
what_changed = self.obj_what_changed()
changes = [field for field in what_changed if field in primitive]
if changes:
obj[self._obj_primitive_key('changes')] = changes
return obj
def obj_set_defaults(self, *attrs):
if not attrs:
attrs = [name for name, field in self.fields.items()
if field.default != obj_fields.UnspecifiedDefault]
for attr in attrs:
default = copy.deepcopy(self.fields[attr].default)
if default is obj_fields.UnspecifiedDefault:
raise exception.ObjectActionError(
action='set_defaults',
reason='No default set for field %s' % attr)
if not self.obj_attr_is_set(attr):
setattr(self, attr, default)
def obj_load_attr(self, attrname):
"""Load an additional attribute from the real object.
This should load self.$attrname and cache any data that might
be useful for future load operations.
"""
raise NotImplementedError(
_("Cannot load '%s' in the base class") % attrname)
def save(self, context):
"""Save the changed fields back to the store.
This is optional for subclasses, but is presented here in the base
class for consistency among those that do.
"""
raise NotImplementedError(_('Cannot save anything in the base class'))
def obj_what_changed(self):
"""Returns a set of fields that have been modified."""
changes = set([field for field in self._changed_fields
if field in self.fields])
for field in self.fields:
if (self.obj_attr_is_set(field) and
isinstance(getattr(self, field), VersionedObject) and
getattr(self, field).obj_what_changed()):
changes.add(field)
return changes
def obj_get_changes(self):
"""Returns a dict of changed fields and their new values."""
changes = {}
for key in self.obj_what_changed():
changes[key] = getattr(self, key)
return changes
def obj_reset_changes(self, fields=None, recursive=False):
"""Reset the list of fields that have been changed.
:param fields: List of fields to reset, or "all" if None.
:param recursive: Call obj_reset_changes(recursive=True) on
any sub-objects within the list of fields
being reset.
This is NOT "revert to previous values".
Specifying fields on recursive resets will only be honored at the top
level. Everything below the top will reset all.
"""
if recursive:
for field in self.obj_get_changes():
# Ignore fields not in requested set (if applicable)
if fields and field not in fields:
continue
# Skip any fields that are unset
if not self.obj_attr_is_set(field):
continue
value = getattr(self, field)
# Don't reset nulled fields
if value is None:
continue
# Reset straight Object and ListOfObjects fields
if isinstance(self.fields[field], obj_fields.ObjectField):
value.obj_reset_changes(recursive=True)
elif isinstance(self.fields[field],
obj_fields.ListOfObjectsField):
for thing in value:
thing.obj_reset_changes(recursive=True)
if fields:
self._changed_fields -= set(fields)
else:
self._changed_fields.clear()
def obj_attr_is_set(self, attrname):
"""Test object to see if attrname is present.
Returns True if the named attribute has a value set, or
False if not. Raises AttributeError if attrname is not
a valid attribute for this object.
"""
if attrname not in self.obj_fields:
raise AttributeError(
_("%(objname)s object has no attribute '%(attrname)s'") %
{'objname': self.obj_name(), 'attrname': attrname})
return hasattr(self, _get_attrname(attrname))
@property
def obj_fields(self):
return list(self.fields.keys()) + self.obj_extra_fields
@property
def obj_context(self):
return self._context
class ComparableVersionedObject(object):
"""Mix-in to provide comparison methods
When objects are to be compared with each other (in tests for example),
this mixin can be used.
"""
def __eq__(self, obj):
# FIXME(inc0): this can return incorrect value if we consider partially
# loaded objects from db and fields which are dropped out differ
if hasattr(obj, 'obj_to_primitive'):
return self.obj_to_primitive() == obj.obj_to_primitive()
return NotImplemented
def __hash__(self):
return super(ComparableVersionedObject, self).__hash__()
def __ne__(self, obj):
if hasattr(obj, 'obj_to_primitive'):
return self.obj_to_primitive() != obj.obj_to_primitive()
return NotImplemented
class TimestampedObject(object):
"""Mixin class for db backed objects with timestamp fields.
Sqlalchemy models that inherit from the oslo_db TimestampMixin will include
these fields and the corresponding objects will benefit from this mixin.
"""
fields = {
'created_at': obj_fields.DateTimeField(nullable=True),
'updated_at': obj_fields.DateTimeField(nullable=True),
}
class VersionedObjectDictCompat(object):
"""Mix-in to provide dictionary key access compatibility
If an object needs to support attribute access using
dictionary items instead of object attributes, inherit
from this class. This should only be used as a temporary
measure until all callers are converted to use modern
attribute access.
"""
def __iter__(self):
for name in self.obj_fields:
if (self.obj_attr_is_set(name) or
name in self.obj_extra_fields):
yield name
keys = __iter__
def values(self):
for name in self:
yield getattr(self, name)
def items(self):
for name in self:
yield name, getattr(self, name)
def __getitem__(self, name):
return getattr(self, name)
def __setitem__(self, name, value):
setattr(self, name, value)
def get(self, key, value=_NotSpecifiedSentinel):
if key not in self.obj_fields:
raise AttributeError("'%s' object has no attribute '%s'" % (
self.__class__, key))
if value != _NotSpecifiedSentinel and not self.obj_attr_is_set(key):
return value
else:
return getattr(self, key)
def update(self, updates):
for key, value in updates.items():
setattr(self, key, value)
class ObjectListBase(collections_abc.Sequence):
"""Mixin class for lists of objects.
This mixin class can be added as a base class for an object that
is implementing a list of objects. It adds a single field of 'objects',
which is the list store, and behaves like a list itself. It supports
serialization of the list of objects automatically.
"""
fields = {
'objects': obj_fields.ListOfObjectsField('VersionedObject'),
}
# This is a dictionary of my_version:child_version mappings so that
# we can support backleveling our contents based on the version
# requested of the list object.
child_versions = {}
def __init__(self, *args, **kwargs):
super(ObjectListBase, self).__init__(*args, **kwargs)
if 'objects' not in kwargs:
self.objects = []
self._changed_fields.discard('objects')
def __len__(self):
"""List length."""
return len(self.objects)
def __getitem__(self, index):
"""List index access."""
if isinstance(index, slice):
new_obj = self.__class__()
new_obj.objects = self.objects[index]
# NOTE(danms): We must be mixed in with a VersionedObject!
new_obj.obj_reset_changes()
new_obj._context = self._context
return new_obj
return self.objects[index]
def sort(self, key=None, reverse=False):
self.objects.sort(key=key, reverse=reverse)
def obj_make_compatible(self, primitive, target_version):
# Give priority to using child_versions, if that isn't set, try
# obj_relationships
if self.child_versions:
relationships = self.child_versions.items()
else:
try:
relationships = self._obj_relationship_for('objects',
target_version)
except exception.ObjectActionError:
# No relationship for this found in manifest or
# in obj_relationships
relationships = {}
try:
# NOTE(rlrossit): If we have no version information, just
# backport to child version 1.0 (maintaining default
# behavior)
if relationships:
_get_subobject_version(target_version, relationships,
lambda ver: _do_subobject_backport(
ver, self, 'objects', primitive))
else:
_do_subobject_backport('1.0', self, 'objects', primitive)
except exception.TargetBeforeSubobjectExistedException:
# Child did not exist, so delete it from the primitive
del primitive['objects']
def obj_what_changed(self):
changes = set(self._changed_fields)
for child in self.objects:
if child.obj_what_changed():
changes.add('objects')
return changes
def __add__(self, other):
# Handling arbitrary fields may not make sense if those fields are not
# all concatenatable. Only concatenate if the base 'objects' field is
# the only one and the classes match.
if (self.__class__ == other.__class__ and
list(self.__class__.fields.keys()) == ['objects']):
return self.__class__(objects=self.objects + other.objects)
else:
raise TypeError("List Objects should be of the same type and only "
"have an 'objects' field")
def __radd__(self, other):
if (self.__class__ == other.__class__ and
list(self.__class__.fields.keys()) == ['objects']):
# This should never be run in practice. If the above condition is
# met then __add__ would have been run.
raise NotImplementedError('__radd__ is not implemented for '
'objects of the same type')
else:
raise TypeError("List Objects should be of the same type and only "
"have an 'objects' field")
class VersionedObjectSerializer(messaging.NoOpSerializer):
"""A VersionedObject-aware Serializer.
This implements the Oslo Serializer interface and provides the
ability to serialize and deserialize VersionedObject entities. Any service
that needs to accept or return VersionedObjects as arguments or result
values should pass this to its RPCClient and RPCServer objects.
"""
# Base class to use for object hydration
OBJ_BASE_CLASS = VersionedObject
def _do_backport(self, context, objprim, objclass):
obj_versions = obj_tree_get_versions(objclass.obj_name())
indirection_api = self.OBJ_BASE_CLASS.indirection_api
try:
return indirection_api.object_backport_versions(
context, objprim, obj_versions)
except NotImplementedError:
# FIXME(danms): Maybe start to warn here about deprecation?
return indirection_api.object_backport(context, objprim,
objclass.VERSION)
def _process_object(self, context, objprim):
try:
return self.OBJ_BASE_CLASS.obj_from_primitive(
objprim, context=context)
except exception.IncompatibleObjectVersion:
with excutils.save_and_reraise_exception(reraise=False) as ctxt:
verkey = \
'%s.version' % self.OBJ_BASE_CLASS.OBJ_SERIAL_NAMESPACE
objver = objprim[verkey]
if objver.count('.') == 2:
# NOTE(danms): For our purposes, the .z part of the version
# should be safe to accept without requiring a backport
objprim[verkey] = \
'.'.join(objver.split('.')[:2])
return self._process_object(context, objprim)
namekey = '%s.name' % self.OBJ_BASE_CLASS.OBJ_SERIAL_NAMESPACE
objname = objprim[namekey]
supported = VersionedObjectRegistry.obj_classes().get(objname,
[])
if self.OBJ_BASE_CLASS.indirection_api and supported:
return self._do_backport(context, objprim, supported[0])
else:
ctxt.reraise = True
def _process_iterable(self, context, action_fn, values):
"""Process an iterable, taking an action on each value.
:param:context: Request context
:param:action_fn: Action to take on each item in values
:param:values: Iterable container of things to take action on
:returns: A new container of the same type (except set) with
items from values having had action applied.
"""
iterable = values.__class__
if issubclass(iterable, dict):
return iterable([(k, action_fn(context, v))
for k, v in values.items()])
else:
# NOTE(danms, gibi) A set can't have an unhashable value inside,
# such as a dict. Convert the set to list, which is fine, since we
# can't send them over RPC anyway. We convert it to list as this
# way there will be no semantic change between the fake rpc driver
# used in functional test and a normal rpc driver.
if iterable == set:
iterable = list
return iterable([action_fn(context, value) for value in values])
def serialize_entity(self, context, entity):
if isinstance(entity, (tuple, list, set, dict)):
entity = self._process_iterable(context, self.serialize_entity,
entity)
elif (hasattr(entity, 'obj_to_primitive') and
callable(entity.obj_to_primitive)):
entity = entity.obj_to_primitive()
return entity
def deserialize_entity(self, context, entity):
namekey = '%s.name' % self.OBJ_BASE_CLASS.OBJ_SERIAL_NAMESPACE
if isinstance(entity, dict) and namekey in entity:
entity = self._process_object(context, entity)
elif isinstance(entity, (tuple, list, set, dict)):
entity = self._process_iterable(context, self.deserialize_entity,
entity)
return entity
class VersionedObjectIndirectionAPI(object, metaclass=abc.ABCMeta):
def object_action(self, context, objinst, objmethod, args, kwargs):
"""Perform an action on a VersionedObject instance.
When indirection_api is set on a VersionedObject (to a class
implementing this interface), method calls on remotable methods
will cause this to be executed to actually make the desired
call. This often involves performing RPC.
:param context: The context within which to perform the action
:param objinst: The object instance on which to perform the action
:param objmethod: The name of the action method to call
:param args: The positional arguments to the action method
:param kwargs: The keyword arguments to the action method
:returns: The result of the action method
"""
pass
def object_class_action(self, context, objname, objmethod, objver,
args, kwargs):
""".. deprecated:: 0.10.0
Use :func:`object_class_action_versions` instead.
Perform an action on a VersionedObject class.
When indirection_api is set on a VersionedObject (to a class
implementing this interface), classmethod calls on
remotable_classmethod methods will cause this to be executed to
actually make the desired call. This usually involves performing
RPC.
:param context: The context within which to perform the action
:param objname: The registry name of the object
:param objmethod: The name of the action method to call
:param objver: The (remote) version of the object on which the
action is being taken
:param args: The positional arguments to the action method
:param kwargs: The keyword arguments to the action method
:returns: The result of the action method, which may (or may not)
be an instance of the implementing VersionedObject class.
"""
pass
def object_class_action_versions(self, context, objname, objmethod,
object_versions, args, kwargs):
"""Perform an action on a VersionedObject class.
When indirection_api is set on a VersionedObject (to a class
implementing this interface), classmethod calls on
remotable_classmethod methods will cause this to be executed to
actually make the desired call. This usually involves performing
RPC.
This differs from object_class_action() in that it is provided
with object_versions, a manifest of client-side object versions
for easier nested backports. The manifest is the result of
calling obj_tree_get_versions().
NOTE: This was not in the initial spec for this interface, so the
base class raises NotImplementedError if you don't implement it.
For backports, this method will be tried first, and if unimplemented,
will fall back to object_class_action(). New implementations should
provide this method instead of object_class_action()
:param context: The context within which to perform the action
:param objname: The registry name of the object
:param objmethod: The name of the action method to call
:param object_versions: A dict of {objname: version} mappings
:param args: The positional arguments to the action method
:param kwargs: The keyword arguments to the action method
:returns: The result of the action method, which may (or may not)
be an instance of the implementing VersionedObject class.
"""
warnings.warn('object_class_action() is deprecated in favor of '
'object_class_action_versions() and will be removed '
'in a later release', DeprecationWarning)
raise NotImplementedError('Multi-version class action not supported')
def object_backport(self, context, objinst, target_version):
""".. deprecated:: 0.10.0
Use :func:`object_backport_versions` instead.
Perform a backport of an object instance to a specified version.
When indirection_api is set on a VersionedObject (to a class
implementing this interface), the default behavior of the base
VersionedObjectSerializer, upon receiving an object with a version
newer than what is in the lcoal registry, is to call this method to
request a backport of the object. In an environment where there is
an RPC-able service on the bus which can gracefully downgrade newer
objects for older services, this method services as a translation
mechanism for older code when receiving objects from newer code.
NOTE: This older/original method is soon to be deprecated. When a
backport is required, the newer object_backport_versions() will be
tried, and if it raises NotImplementedError, then we will fall back
to this (less optimal) method.
:param context: The context within which to perform the backport
:param objinst: An instance of a VersionedObject to be backported
:param target_version: The maximum version of the objinst's class
that is understood by the requesting host.
:returns: The downgraded instance of objinst
"""
pass
def object_backport_versions(self, context, objinst, object_versions):
"""Perform a backport of an object instance.
This method is basically just like object_backport() but instead of
providing a specific target version for the toplevel object and
relying on the service-side mapping to handle sub-objects, this sends
a mapping of all the dependent objects and their client-supported
versions. The server will backport objects within the tree starting
at objinst to the versions specified in object_versions, removing
objects that have no entry. Use obj_tree_get_versions() to generate
this mapping.
NOTE: This was not in the initial spec for this interface, so the
base class raises NotImplementedError if you don't implement it.
For backports, this method will be tried first, and if unimplemented,
will fall back to object_backport().
:param context: The context within which to perform the backport
:param objinst: An instance of a VersionedObject to be backported
:param object_versions: A dict of {objname: version} mappings
"""
warnings.warn('object_backport() is deprecated in favor of '
'object_backport_versions() and will be removed '
'in a later release', DeprecationWarning)
raise NotImplementedError('Multi-version backport not supported')
def obj_make_list(context, list_obj, item_cls, db_list, **extra_args):
"""Construct an object list from a list of primitives.
This calls item_cls._from_db_object() on each item of db_list, and
adds the resulting object to list_obj.
:param:context: Request context
:param:list_obj: An ObjectListBase object
:param:item_cls: The VersionedObject class of the objects within the list
:param:db_list: The list of primitives to convert to objects
:param:extra_args: Extra arguments to pass to _from_db_object()
:returns: list_obj
"""
list_obj.objects = []
for db_item in db_list:
item = item_cls._from_db_object(context, item_cls(), db_item,
**extra_args)
list_obj.objects.append(item)
list_obj._context = context
list_obj.obj_reset_changes()
return list_obj
def obj_tree_get_versions(objname, tree=None):
"""Construct a mapping of dependent object versions.
This method builds a list of dependent object versions given a top-
level object with other objects as fields. It walks the tree recursively
to determine all the objects (by symbolic name) that could be contained
within the top-level object, and the maximum versions of each. The result
is a dict like::
{'MyObject': '1.23', ... }
:param objname: The top-level object at which to start
:param tree: Used internally, pass None here.
:returns: A dictionary of object names and versions
"""
if tree is None:
tree = {}
if objname in tree:
return tree
objclass = VersionedObjectRegistry.obj_classes()[objname][0]
tree[objname] = objclass.VERSION
for field_name in objclass.fields:
field = objclass.fields[field_name]
if isinstance(field, obj_fields.ObjectField):
child_cls = field._type._obj_name
elif isinstance(field, obj_fields.ListOfObjectsField):
child_cls = field._type._element_type._type._obj_name
else:
continue
try:
obj_tree_get_versions(child_cls, tree=tree)
except IndexError:
raise exception.UnregisteredSubobject(
child_objname=child_cls, parent_objname=objname)
return tree
def _get_subobject_version(tgt_version, relationships, backport_func):
"""Get the version to which we need to convert a subobject.
This uses the relationships between a parent and a subobject,
along with the target parent version, to decide the version we need
to convert a subobject to. If the subobject did not exist in the parent at
the target version, TargetBeforeChildExistedException is raised. If there
is a need to backport, backport_func is called and the subobject version
to backport to is passed in.
:param tgt_version: The version we are converting the parent to
:param relationships: A list of (parent, subobject) version tuples
:param backport_func: A backport function that takes in the subobject
version
:returns: The version we need to convert the subobject to
"""
tgt = vutils.convert_version_to_tuple(tgt_version)
for index, versions in enumerate(relationships):
parent, child = versions
parent = vutils.convert_version_to_tuple(parent)
if tgt < parent:
if index == 0:
# We're backporting to a version of the parent that did
# not contain this subobject
raise exception.TargetBeforeSubobjectExistedException(
target_version=tgt_version)
else:
# We're in a gap between index-1 and index, so set the desired
# version to the previous index's version
child = relationships[index - 1][1]
backport_func(child)
return
elif tgt == parent:
# We found the version we want, so backport to it
backport_func(child)
return
def _do_subobject_backport(to_version, parent, field, primitive):
obj = getattr(parent, field)
manifest = (hasattr(parent, '_obj_version_manifest') and
parent._obj_version_manifest or None)
if isinstance(obj, VersionedObject):
obj.obj_make_compatible_from_manifest(
obj._obj_primitive_field(primitive[field], 'data'),
to_version, version_manifest=manifest)
ver_key = obj._obj_primitive_key('version')
primitive[field][ver_key] = to_version
elif isinstance(obj, list):
for i, element in enumerate(obj):
element.obj_make_compatible_from_manifest(
element._obj_primitive_field(primitive[field][i], 'data'),
to_version, version_manifest=manifest)
ver_key = element._obj_primitive_key('version')
primitive[field][i][ver_key] = to_version
| apache-2.0 | -5,300,163,210,659,548,000 | 41.287954 | 79 | 0.612491 | false |
ruleant/buildtime-trend | buildtimetrend/collection.py | 1 | 2160 | # vim: set expandtab sw=4 ts=4:
'''
Dictionary based collection class.
Copyright (C) 2014 Dieter Adriaenssens <[email protected]>
This file is part of buildtime-trend
<https://github.com/ruleant/buildtime-trend/>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import copy
from buildtimetrend.tools import check_dict
class Collection(object):
'''
Dictionary based collection object.
'''
def __init__(self):
self.items = {}
def add_item(self, name, value):
'''
Add an item to the collection
Parameters :
- name : Item name
- value : Item value
'''
self.items[name] = value
def get_item(self, name):
'''
Get an item from a collection
Parameters :
- name : Item name
'''
if name in self.items:
return self.items[name]
else:
return None
def get_size(self):
'''
Get an item from a collection
Parameters :
- name : Item name
'''
return len(self.items)
def add_items(self, items_dict):
'''
Add items as a dictionary to the collection
Parameters:
- items_dict : dictionary with items
'''
if check_dict(items_dict, "items_dict"):
# append dictionary with items to the existing collection
self.items.update(items_dict)
def get_items(self):
'''
Return items collection as dictionary
'''
# copy values of items collection
return copy.deepcopy(self.items)
| gpl-3.0 | -7,609,691,097,847,600,000 | 25.024096 | 70 | 0.630556 | false |
henkhaus/wow | testing/plotter.py | 1 | 1278 | from pymongo import MongoClient
from matplotlib import pyplot as plt
import os
from datetime import datetime, date, time, timedelta
client = MongoClient()
# using wowtest.auctiondata
db = client.wowtest
posts = db.auctiondata
auctions = posts.find().limit(10)
#time.time() into datetime --->
#datetime.datetime.fromtimestamp('xxxx').strftime('%c')
def dt_to_timestamp(dt):
#timestamp = (dt - datetime(1970, 1, 1).total_seconds())
return (int(dt.strftime('%s')))
def getdata(num, quantum):
valid = []
today = datetime.combine(date.today(), time())
for i in range(num+1):
day = today - i*quantum
gte = dt_to_timestamp(day)
lt = dt_to_timestamp(day+quantum)
time_query = {'$gte':gte, '$lt':lt}
valid.insert(0, posts.find({'viewtime':time_query}).count())
return valid
def format_date(x, n):
today = datetime.combine(date.today(), time())
day = today - timedelta(hours=n-x-1)
return day.strftime('%m%d%H')
def plotbar(data, color):
plt.bar(range(len(data)), data, align='center', color=color)
# run
n = 48
val = getdata(n, timedelta(hours=1))
plotbar(val, '#4788d2')
plt.xticks(range(n), [format_date(i, n) for i in range(n)], size='small', rotation=90)
plt.grid(axis='y')
plt.show()
| apache-2.0 | 1,368,956,586,611,997,000 | 22.666667 | 86 | 0.649452 | false |
davy39/eric | Graphics/UMLDialog.py | 1 | 14316 | # -*- coding: utf-8 -*-
# Copyright (c) 2007 - 2014 Detlev Offenbach <[email protected]>
#
"""
Module implementing a dialog showing UML like diagrams.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import pyqtSlot, Qt, QFileInfo
from PyQt5.QtWidgets import QAction, QToolBar, QGraphicsScene
from E5Gui import E5MessageBox, E5FileDialog
from E5Gui.E5MainWindow import E5MainWindow
import UI.Config
import UI.PixmapCache
class UMLDialog(E5MainWindow):
"""
Class implementing a dialog showing UML like diagrams.
"""
NoDiagram = 255
ClassDiagram = 0
PackageDiagram = 1
ImportsDiagram = 2
ApplicationDiagram = 3
FileVersions = ["1.0"]
def __init__(self, diagramType, project, path="", parent=None,
initBuilder=True, **kwargs):
"""
Constructor
@param diagramType type of the diagram (one of ApplicationDiagram,
ClassDiagram, ImportsDiagram, NoDiagram, PackageDiagram)
@param project reference to the project object (Project)
@param path file or directory path to build the diagram from (string)
@param parent parent widget of the dialog (QWidget)
@keyparam initBuilder flag indicating to initialize the diagram
builder (boolean)
@keyparam kwargs diagram specific data
"""
super(UMLDialog, self).__init__(parent)
self.setObjectName("UMLDialog")
self.__diagramType = diagramType
self.__project = project
from .UMLGraphicsView import UMLGraphicsView
self.scene = QGraphicsScene(0.0, 0.0, 800.0, 600.0)
self.umlView = UMLGraphicsView(self.scene, parent=self)
self.builder = self.__diagramBuilder(
self.__diagramType, path, **kwargs)
if self.builder and initBuilder:
self.builder.initialize()
self.__fileName = ""
self.__initActions()
self.__initToolBars()
self.setCentralWidget(self.umlView)
self.umlView.relayout.connect(self.__relayout)
self.setWindowTitle(self.__diagramTypeString())
def __initActions(self):
"""
Private slot to initialize the actions.
"""
self.closeAct = \
QAction(UI.PixmapCache.getIcon("close.png"),
self.tr("Close"), self)
self.closeAct.triggered.connect(self.close)
self.openAct = \
QAction(UI.PixmapCache.getIcon("open.png"),
self.tr("Load"), self)
self.openAct.triggered.connect(self.load)
self.saveAct = \
QAction(UI.PixmapCache.getIcon("fileSave.png"),
self.tr("Save"), self)
self.saveAct.triggered.connect(self.__save)
self.saveAsAct = \
QAction(UI.PixmapCache.getIcon("fileSaveAs.png"),
self.tr("Save As..."), self)
self.saveAsAct.triggered.connect(self.__saveAs)
self.saveImageAct = \
QAction(UI.PixmapCache.getIcon("fileSavePixmap.png"),
self.tr("Save as Image"), self)
self.saveImageAct.triggered.connect(self.umlView.saveImage)
self.printAct = \
QAction(UI.PixmapCache.getIcon("print.png"),
self.tr("Print"), self)
self.printAct.triggered.connect(self.umlView.printDiagram)
self.printPreviewAct = \
QAction(UI.PixmapCache.getIcon("printPreview.png"),
self.tr("Print Preview"), self)
self.printPreviewAct.triggered.connect(
self.umlView.printPreviewDiagram)
def __initToolBars(self):
"""
Private slot to initialize the toolbars.
"""
self.windowToolBar = QToolBar(self.tr("Window"), self)
self.windowToolBar.setIconSize(UI.Config.ToolBarIconSize)
self.windowToolBar.addAction(self.closeAct)
self.fileToolBar = QToolBar(self.tr("File"), self)
self.fileToolBar.setIconSize(UI.Config.ToolBarIconSize)
self.fileToolBar.addAction(self.openAct)
self.fileToolBar.addSeparator()
self.fileToolBar.addAction(self.saveAct)
self.fileToolBar.addAction(self.saveAsAct)
self.fileToolBar.addAction(self.saveImageAct)
self.fileToolBar.addSeparator()
self.fileToolBar.addAction(self.printPreviewAct)
self.fileToolBar.addAction(self.printAct)
self.umlToolBar = self.umlView.initToolBar()
self.addToolBar(Qt.TopToolBarArea, self.fileToolBar)
self.addToolBar(Qt.TopToolBarArea, self.windowToolBar)
self.addToolBar(Qt.TopToolBarArea, self.umlToolBar)
def show(self, fromFile=False):
"""
Public method to show the dialog.
@keyparam fromFile flag indicating, that the diagram was loaded
from file (boolean)
"""
if not fromFile and self.builder:
self.builder.buildDiagram()
super(UMLDialog, self).show()
def __relayout(self):
"""
Private method to relayout the diagram.
"""
if self.builder:
self.builder.buildDiagram()
def __diagramBuilder(self, diagramType, path, **kwargs):
"""
Private method to instantiate a diagram builder object.
@param diagramType type of the diagram
(one of ApplicationDiagram, ClassDiagram, ImportsDiagram,
PackageDiagram)
@param path file or directory path to build the diagram from (string)
@keyparam kwargs diagram specific data
@return reference to the instantiated diagram builder
@exception ValueError raised to indicate an illegal diagram type
"""
if diagramType == UMLDialog.ClassDiagram:
from .UMLClassDiagramBuilder import UMLClassDiagramBuilder
return UMLClassDiagramBuilder(
self, self.umlView, self.__project, path, **kwargs)
elif diagramType == UMLDialog.PackageDiagram:
from .PackageDiagramBuilder import PackageDiagramBuilder
return PackageDiagramBuilder(
self, self.umlView, self.__project, path, **kwargs)
elif diagramType == UMLDialog.ImportsDiagram:
from .ImportsDiagramBuilder import ImportsDiagramBuilder
return ImportsDiagramBuilder(
self, self.umlView, self.__project, path, **kwargs)
elif diagramType == UMLDialog.ApplicationDiagram:
from .ApplicationDiagramBuilder import ApplicationDiagramBuilder
return ApplicationDiagramBuilder(
self, self.umlView, self.__project, **kwargs)
elif diagramType == UMLDialog.NoDiagram:
return None
else:
raise ValueError(self.tr(
"Illegal diagram type '{0}' given.").format(diagramType))
def __diagramTypeString(self):
"""
Private method to generate a readable string for the diagram type.
@return readable type string (string)
"""
if self.__diagramType == UMLDialog.ClassDiagram:
return "Class Diagram"
elif self.__diagramType == UMLDialog.PackageDiagram:
return "Package Diagram"
elif self.__diagramType == UMLDialog.ImportsDiagram:
return "Imports Diagram"
elif self.__diagramType == UMLDialog.ApplicationDiagram:
return "Application Diagram"
else:
return "Illegal Diagram Type"
def __save(self):
"""
Private slot to save the diagram with the current name.
"""
self.__saveAs(self.__fileName)
@pyqtSlot()
def __saveAs(self, filename=""):
"""
Private slot to save the diagram.
@param filename name of the file to write to (string)
"""
if not filename:
fname, selectedFilter = E5FileDialog.getSaveFileNameAndFilter(
self,
self.tr("Save Diagram"),
"",
self.tr("Eric Graphics File (*.e5g);;All Files (*)"),
"",
E5FileDialog.Options(E5FileDialog.DontConfirmOverwrite))
if not fname:
return
ext = QFileInfo(fname).suffix()
if not ext:
ex = selectedFilter.split("(*")[1].split(")")[0]
if ex:
fname += ex
if QFileInfo(fname).exists():
res = E5MessageBox.yesNo(
self,
self.tr("Save Diagram"),
self.tr("<p>The file <b>{0}</b> already exists."
" Overwrite it?</p>").format(fname),
icon=E5MessageBox.Warning)
if not res:
return
filename = fname
lines = [
"version: 1.0",
"diagram_type: {0} ({1})".format(
self.__diagramType, self.__diagramTypeString()),
"scene_size: {0};{1}".format(self.scene.width(),
self.scene.height()),
]
persistenceData = self.builder.getPersistenceData()
if persistenceData:
lines.append("builder_data: {0}".format(persistenceData))
lines.extend(self.umlView.getPersistenceData())
try:
f = open(filename, "w", encoding="utf-8")
f.write("\n".join(lines))
f.close()
except (IOError, OSError) as err:
E5MessageBox.critical(
self,
self.tr("Save Diagram"),
self.tr(
"""<p>The file <b>{0}</b> could not be saved.</p>"""
"""<p>Reason: {1}</p>""").format(filename, str(err)))
return
self.__fileName = filename
def load(self):
"""
Public method to load a diagram from a file.
@return flag indicating success (boolean)
"""
filename = E5FileDialog.getOpenFileName(
self,
self.tr("Load Diagram"),
"",
self.tr("Eric Graphics File (*.e5g);;All Files (*)"))
if not filename:
# Cancelled by user
return False
try:
f = open(filename, "r", encoding="utf-8")
data = f.read()
f.close()
except (IOError, OSError) as err:
E5MessageBox.critical(
self,
self.tr("Load Diagram"),
self.tr(
"""<p>The file <b>{0}</b> could not be read.</p>"""
"""<p>Reason: {1}</p>""").format(filename, str(err)))
return False
lines = data.splitlines()
if len(lines) < 3:
self.__showInvalidDataMessage(filename)
return False
try:
# step 1: check version
linenum = 0
key, value = lines[linenum].split(": ", 1)
if key.strip() != "version" or \
value.strip() not in UMLDialog.FileVersions:
self.__showInvalidDataMessage(filename, linenum)
return False
else:
version = value
# step 2: extract diagram type
linenum += 1
key, value = lines[linenum].split(": ", 1)
if key.strip() != "diagram_type":
self.__showInvalidDataMessage(filename, linenum)
return False
try:
self.__diagramType = int(value.strip().split(None, 1)[0])
except ValueError:
self.__showInvalidDataMessage(filename, linenum)
return False
self.scene.clear()
self.builder = self.__diagramBuilder(self.__diagramType, "")
# step 3: extract scene size
linenum += 1
key, value = lines[linenum].split(": ", 1)
if key.strip() != "scene_size":
self.__showInvalidDataMessage(filename, linenum)
return False
try:
width, height = [float(v.strip()) for v in value.split(";")]
except ValueError:
self.__showInvalidDataMessage(filename, linenum)
return False
self.umlView.setSceneSize(width, height)
# step 4: extract builder data if available
linenum += 1
key, value = lines[linenum].split(": ", 1)
if key.strip() == "builder_data":
ok = self.builder.parsePersistenceData(version, value)
if not ok:
self.__showInvalidDataMessage(filename, linenum)
return False
linenum += 1
# step 5: extract the graphics items
ok, vlinenum = self.umlView.parsePersistenceData(
version, lines[linenum:])
if not ok:
self.__showInvalidDataMessage(filename, linenum + vlinenum)
return False
except IndexError:
self.__showInvalidDataMessage(filename)
return False
# everything worked fine, so remember the file name
self.__fileName = filename
return True
def __showInvalidDataMessage(self, filename, linenum=-1):
"""
Private slot to show a message dialog indicating an invalid data file.
@param filename name of the file containing the invalid data (string)
@param linenum number of the invalid line (integer)
"""
if linenum < 0:
msg = self.tr("""<p>The file <b>{0}</b> does not contain"""
""" valid data.</p>""").format(filename)
else:
msg = self.tr("""<p>The file <b>{0}</b> does not contain"""
""" valid data.</p><p>Invalid line: {1}</p>"""
).format(filename, linenum + 1)
E5MessageBox.critical(self, self.tr("Load Diagram"), msg)
| gpl-3.0 | 6,361,713,874,534,443,000 | 36.37859 | 78 | 0.551132 | false |
kylefrost/budget | main.py | 1 | 1783 | from flask import Flask, render_template, request, redirect
from sql import select
# Create Flask app
app = Flask(__name__)
# API Blueprint
from api import api
app.register_blueprint(api, url_prefix="/api")
# Load Index page
@app.route("/")
def index():
return render_template("index.html")
# --------------- BILLS --------------- #
# Bills page
@app.route("/bills")
def bills():
bills = select("bills")
return render_template("bills.html", bills=bills)
# Add Bill page
@app.route("/bills/add")
def bills_add():
return render_template("bills_add.html")
# Edit Bill page
@app.route("/bills/edit")
def bills_edit():
return render_template("bills_edit.html")
# --------------- SPENDING --------------- #
# Spending page
@app.route("/spending")
def spending():
spending = select("spending")
return render_template("spending.html", spending=spending)
# Add Spending page
@app.route("/spending/add")
def spending_add():
accounts = select("accounts")
return render_template("spending_add.html", accounts=accounts)
# Edit Spending page
@app.route("/spending/edit")
def spending_edit():
return render_template("spending_edit.html")
# --------------- ACCOUNTS --------------- #
# Accounts page
@app.route("/accounts")
def accounts():
accounts = select("accounts")
return render_template("accounts.html", accounts=accounts)
# Add Account page
@app.route("/accounts/add")
def accounts_add():
return render_template("accounts_add.html")
# Edit Account page
@app.route("/accounts/edit")
def accounts_edit():
return render_template("accounts_edit.html")
# Run Flask app on load
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0")
| gpl-3.0 | -3,901,554,615,492,134,400 | 21.155844 | 66 | 0.621985 | false |
ApptuitAI/xcollector | collectors/etc/metric_naming.py | 1 | 1357 | #!/usr/bin/env python
def print_if_apptuit_standard_metric(metric, mapping, timestamp, value, tags=None, tags_str=None):
if metric not in list(mapping["metrics"].keys()):
return
new_metric_name = mapping["metrics"][metric]["standard_name"]
new_metric_tags_str = ""
if tags is not None or tags_str is not None or "tags" in mapping or "tags" in mapping["metrics"][metric]:
new_metric_tags = {}
if tags is not None:
for tag in tags:
new_metric_tags[tag] = tags[tag]
if "tags" in mapping:
for tag in mapping["tags"]:
new_metric_tags[tag] = mapping["tags"][tag]
if "tags" in mapping["metrics"][metric]:
for tag in mapping["metrics"][metric]["tags"]:
new_metric_tags[tag] = mapping["metrics"][metric]["tags"][tag]
for i, tag in enumerate(new_metric_tags):
if i != len(new_metric_tags):
new_metric_tags_str += tag + "=" + new_metric_tags[tag] + " "
else:
new_metric_tags_str += tag + "=" + new_metric_tags[tag]
if tags_str is not None:
new_metric_tags_str = new_metric_tags_str.strip()
new_metric_tags_str += " " + tags_str.strip()
print("%s %d %s %s"
% (new_metric_name, timestamp, value, new_metric_tags_str))
| lgpl-3.0 | -2,912,226,242,471,547,400 | 47.464286 | 109 | 0.559322 | false |
kiyukuta/chainer | chainer/links/connection/n_step_rnn.py | 1 | 10731 | import numpy
import six
from chainer import cuda
from chainer.functions.array import permutate
from chainer.functions.array import transpose_sequence
from chainer.functions.connection import n_step_rnn as rnn
from chainer import link
from chainer.utils import argument
from chainer import variable
def argsort_list_descent(lst):
return numpy.argsort([-len(x.data) for x in lst]).astype('i')
def permutate_list(lst, indices, inv):
ret = [None] * len(lst)
if inv:
for i, ind in enumerate(indices):
ret[ind] = lst[i]
else:
for i, ind in enumerate(indices):
ret[i] = lst[ind]
return ret
class NStepRNNBase(link.ChainList):
"""__init__(self, n_layers, in_size, out_size, dropout, use_bi_direction, activation)
Base link class for Stacked RNN/BiRNN links.
This link is base link class for :func:`chainer.links.NStepRNN` and
:func:`chainer.links.NStepBiRNN`.
This link's behavior depends on argument, ``use_bi_direction``.
.. warning::
``use_cudnn`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('use_cudnn', use_cudnn)``.
See :func:`chainer.using_config`.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
use_bi_direction (bool): if ``True``, use Bi-directional RNN.
if ``False``, use Uni-directional RNN.
activation (str): Activation function name.
Please select ``tanh`` or ``relu``.
.. seealso::
:func:`chainer.links.NStepRNNReLU`
:func:`chainer.links.NStepRNNTanh`
:func:`chainer.links.NStepBiRNNReLU`
:func:`chainer.links.NStepBiRNNTanh`
""" # NOQA
def __init__(self, n_layers, in_size, out_size, dropout,
use_bi_direction, activation, **kwargs):
argument.check_unexpected_kwargs(
kwargs, use_cudnn='use_cudnn argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
weights = []
direction = 2 if use_bi_direction else 1
for i in six.moves.range(n_layers):
for di in six.moves.range(direction):
weight = link.Link()
for j in six.moves.range(2):
if i == 0 and j < 1:
w_in = in_size
elif i > 0 and j < 1:
w_in = out_size * direction
else:
w_in = out_size
weight.add_param('w%d' % j, (out_size, w_in))
weight.add_param('b%d' % j, (out_size,))
getattr(weight, 'w%d' % j).data[...] = numpy.random.normal(
0, numpy.sqrt(1. / w_in), (out_size, w_in))
getattr(weight, 'b%d' % j).data[...] = 0
weights.append(weight)
super(NStepRNNBase, self).__init__(*weights)
self.n_layers = n_layers
self.dropout = dropout
self.activation = activation
self.out_size = out_size
self.direction = direction
self.rnn = rnn.n_step_birnn if use_bi_direction else rnn.n_step_rnn
def init_hx(self, xs):
shape = (self.n_layers * self.direction, len(xs), self.out_size)
with cuda.get_device_from_id(self._device_id):
hx = variable.Variable(self.xp.zeros(shape, dtype=xs[0].dtype))
return hx
def __call__(self, hx, xs, **kwargs):
"""__call__(self, hx, xs)
Calculate all hidden states and cell states.
.. warning::
``train`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('train', train)``.
See :func:`chainer.using_config`.
Args:
hx (~chainer.Variable or None): Initial hidden states. If ``None``
is specified zero-vector is used.
xs (list of ~chianer.Variable): List of input sequences.
Each element ``xs[i]`` is a :class:`chainer.Variable` holding
a sequence.
"""
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
assert isinstance(xs, (list, tuple))
indices = argsort_list_descent(xs)
xs = permutate_list(xs, indices, inv=False)
if hx is None:
hx = self.init_hx(xs)
else:
hx = permutate.permutate(hx, indices, axis=1, inv=False)
trans_x = transpose_sequence.transpose_sequence(xs)
ws = [[w.w0, w.w1] for w in self]
bs = [[w.b0, w.b1] for w in self]
hy, trans_y = self.rnn(
self.n_layers, self.dropout, hx, ws, bs, trans_x,
activation=self.activation)
hy = permutate.permutate(hy, indices, axis=1, inv=True)
ys = transpose_sequence.transpose_sequence(trans_y)
ys = permutate_list(ys, indices, inv=True)
return hy, ys
class NStepRNNTanh(NStepRNNBase):
"""__init__(self, n_layers, in_size, out_size, dropout)
Stacked Uni-directional RNN for sequnces.
This link is stacked version of Uni-directional RNN for sequences.
Note that the activation function is ``tanh``.
It calculates hidden and cell states of all layer at end-of-string,
and all hidden states of the last layer for each time.
Unlike :func:`chainer.functions.n_step_rnn`, this function automatically
sort inputs in descending order by length, and transpose the seuqnece.
Users just need to call the link with a list of :class:`chainer.Variable`
holding sequences.
.. warning::
``use_cudnn`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('use_cudnn', use_cudnn)``.
See :func:`chainer.using_config`.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
.. seealso::
:func:`chainer.functions.n_step_rnn`
"""
def __init__(self, n_layers, in_size, out_size, dropout, **kwargs):
NStepRNNBase.__init__(
self, n_layers, in_size, out_size, dropout,
use_bi_direction=False, activation='tanh', **kwargs)
class NStepRNNReLU(NStepRNNBase):
"""__init__(self, n_layers, in_size, out_size, dropout)
Stacked Uni-directional RNN for sequnces.
This link is stacked version of Uni-directional RNN for sequences.
Note that the activation function is ``relu``.
It calculates hidden and cell states of all layer at end-of-string,
and all hidden states of the last layer for each time.
Unlike :func:`chainer.functions.n_step_rnn`, this function automatically
sort inputs in descending order by length, and transpose the seuqnece.
Users just need to call the link with a list of :class:`chainer.Variable`
holding sequences.
.. warning::
``use_cudnn`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('use_cudnn', use_cudnn)``.
See :func:`chainer.using_config`.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
.. seealso::
:func:`chainer.functions.n_step_rnn`
"""
def __init__(self, n_layers, in_size, out_size, dropout, **kwargs):
NStepRNNBase.__init__(
self, n_layers, in_size, out_size, dropout,
use_bi_direction=False, activation='relu', **kwargs)
class NStepBiRNNTanh(NStepRNNBase):
"""__init__(self, n_layers, in_size, out_size, dropout)
Stacked Bi-directional RNN for sequnces.
This link is stacked version of Bi-directional RNN for sequences.
Note that the activation function is ``tanh``.
It calculates hidden and cell states of all layer at end-of-string,
and all hidden states of the last layer for each time.
Unlike :func:`chainer.functions.n_step_birnn`, this function automatically
sort inputs in descending order by length, and transpose the seuqnece.
Users just need to call the link with a list of :class:`chainer.Variable`
holding sequences.
.. warning::
``use_cudnn`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('use_cudnn', use_cudnn)``.
See :func:`chainer.using_config`.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
use_cudnn (bool): Use cuDNN.
.. seealso::
:func:`chainer.functions.n_step_birnn`
"""
def __init__(self, n_layers, in_size, out_size, dropout, **kwargs):
NStepRNNBase.__init__(
self, n_layers, in_size, out_size, dropout,
use_bi_direction=True, activation='tanh', **kwargs)
class NStepBiRNNReLU(NStepRNNBase):
"""__init__(self, n_layers, in_size, out_size, dropout)
Stacked Bi-directional RNN for sequnces.
This link is stacked version of Bi-directional RNN for sequences.
Note that the activation function is ``relu``.
It calculates hidden and cell states of all layer at end-of-string,
and all hidden states of the last layer for each time.
Unlike :func:`chainer.functions.n_step_birnn`, this function automatically
sort inputs in descending order by length, and transpose the seuqnece.
Users just need to call the link with a list of :class:`chainer.Variable`
holding sequences.
.. warning::
``use_cudnn`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('use_cudnn', use_cudnn)``.
See :func:`chainer.using_config`.
Args:
n_layers (int): Number of layers.
in_size (int): Dimensionality of input vectors.
out_size (int): Dimensionality of hidden states and output vectors.
dropout (float): Dropout ratio.
.. seealso::
:func:`chainer.functions.n_step_birnn`
"""
def __init__(self, n_layers, in_size, out_size, dropout, **kwargs):
NStepRNNBase.__init__(
self, n_layers, in_size, out_size, dropout,
use_bi_direction=True, activation='relu', **kwargs)
| mit | -8,108,629,455,502,223,000 | 34.651163 | 89 | 0.619514 | false |
kubernetes-client/python | kubernetes/client/models/v2beta2_resource_metric_source.py | 1 | 4559 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V2beta2ResourceMetricSource(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'target': 'V2beta2MetricTarget'
}
attribute_map = {
'name': 'name',
'target': 'target'
}
def __init__(self, name=None, target=None, local_vars_configuration=None): # noqa: E501
"""V2beta2ResourceMetricSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._target = None
self.discriminator = None
self.name = name
self.target = target
@property
def name(self):
"""Gets the name of this V2beta2ResourceMetricSource. # noqa: E501
name is the name of the resource in question. # noqa: E501
:return: The name of this V2beta2ResourceMetricSource. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V2beta2ResourceMetricSource.
name is the name of the resource in question. # noqa: E501
:param name: The name of this V2beta2ResourceMetricSource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def target(self):
"""Gets the target of this V2beta2ResourceMetricSource. # noqa: E501
:return: The target of this V2beta2ResourceMetricSource. # noqa: E501
:rtype: V2beta2MetricTarget
"""
return self._target
@target.setter
def target(self, target):
"""Sets the target of this V2beta2ResourceMetricSource.
:param target: The target of this V2beta2ResourceMetricSource. # noqa: E501
:type: V2beta2MetricTarget
"""
if self.local_vars_configuration.client_side_validation and target is None: # noqa: E501
raise ValueError("Invalid value for `target`, must not be `None`") # noqa: E501
self._target = target
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V2beta2ResourceMetricSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V2beta2ResourceMetricSource):
return True
return self.to_dict() != other.to_dict()
| apache-2.0 | -5,506,228,094,858,839,000 | 29.393333 | 124 | 0.58741 | false |
nico202/pyNeMo | plugins/images/membrane.py | 1 | 1385 | #Save spiking:
saveSourceImage(output_firings, '.' + general_config_hash + config_hash + '.png')
for neuron in to_save:
stims = []
n = 0
for n in range(0, general_config.steps):
if n in config.step_input:
found = False
for s in config.step_input[n]:
if s[0] == neuron:
stims.append(s[1])
found = True
if not found:
stims.append(0)
else:
stims.append(0)
saveRawImage(membraneImage((membrane_output[neuron], stims), title = config.name + ' - neuron ' + str(neuron)),
'.' + general_config_hash + config_hash + '_membrane' + str(neuron) + '.png')
for neuron in to_save:
saveRawImage(membraneImage((membrane_output[neuron], stims), close = False, title = config.name),
'.' + general_config_hash + config_hash + '_membrane' + str(neuron) + '_Mixed.png', close = False)
#TODO: add "angles" (is a list of lists of angles for every sensory neuron)
saveKey(general_config_hash + config_hash + '_angles', angles)
print("Output file is: %s" % (general_config._history_dir + '/' + '.' + general_config_hash + config_hash))
#Show spiking:
showSourceImage("." + general_config_hash + config_hash + '.png')
for neuron in to_save:
showSourceImage('.' + general_config_hash + config_hash + '_membrane' + str(neuron) + '.png')
| gpl-2.0 | -7,114,411,268,899,487,000 | 46.758621 | 115 | 0.6 | false |
danielfrg/ec2hosts | ec2hosts/cli.py | 1 | 1208 | from __future__ import print_function, absolute_import, division
import sys
import click
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
import ec2hosts
def main():
try:
cli(obj={})
except Exception as e:
import traceback
click.echo(traceback.format_exc(), err=True)
sys.exit(1)
@click.group(invoke_without_command=True, context_settings=CONTEXT_SETTINGS)
@click.version_option(prog_name='Anaconda Cluster', version=ec2hosts.__version__)
@click.pass_context
def cli(ctx):
ctx.obj = {}
if ctx.invoked_subcommand is None:
ctx.invoke(run)
@cli.command(short_help='Run')
@click.pass_context
def run(ctx):
click.echo("New /etc/hosts file:")
content = ec2hosts.gen_file()
click.echo(content)
if click.confirm('Do you want to continue?'):
ec2hosts.write(content)
ec2hosts.move()
@cli.command(short_help='Clean')
@click.pass_context
def clean(ctx):
click.echo("New /etc/hosts file:")
content = ec2hosts.read_file()
content = ec2hosts.clean(ec2hosts.read_file())
click.echo(content)
if click.confirm('Do you want to continue?'):
ec2hosts.write(content)
ec2hosts.move()
| apache-2.0 | 8,346,817,316,003,575,000 | 23.16 | 81 | 0.663907 | false |
muggot/python-goose | tests/tests.py | 1 | 24622 | # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import goose
from goose.utils import FileHelper
from goose.Goose import Goose
from goose.Article import Article
from goose.parsers import Parser
from goose.Configuration import Configuration
import unittest
import pprint
class TestParser(unittest.TestCase):
def getHtml(self, filename):
return FileHelper.loadResourceFile(filename)
def test_childNodesWithText(self):
html = '<html><body>'
html += '<p>this is a test <a class="link">link</a> and this is <strong class="link">strong</strong></p>'
html += '<p>this is a test and this is <strong class="link">strong</strong></p>'
html += '</body></html>'
doc = Parser.fromstring(html)
p = Parser.getElementsByTag(doc, tag='p')[0]
def test_replacetag(self):
html = self.getHtml('parser/test1.html')
doc = Parser.fromstring(html)
# replace all p with div
ps = Parser.getElementsByTag(doc, tag='p')
divs = Parser.getElementsByTag(doc, tag='div')
pcount = len(ps)
divcount = len(divs)
for p in ps:
Parser.replaceTag(p, 'div')
divs2 = Parser.getElementsByTag(doc, tag='div')
divcount2 = len(divs2)
self.assertEqual(divcount2, pcount + divcount)
# replace first div span with center
spans = Parser.getElementsByTag(doc, tag='span')
spanscount = len(spans)
div = Parser.getElementsByTag(doc, tag='div')[0]
span = Parser.getElementsByTag(div, tag='span')
self.assertEqual(len(span), 1)
Parser.replaceTag(span[0], 'center')
span = Parser.getElementsByTag(div, tag='span')
self.assertEqual(len(span), 0)
centers = Parser.getElementsByTag(div, tag='center')
self.assertEqual(len(centers), 1)
def test_tostring(self):
html = '<html><body>'
html += '<p>this is a test <a>link</a> and this is <strong>strong</strong></p>'
html += '</body></html>'
doc = Parser.fromstring(html)
result = Parser.nodeToString(doc)
self.assertEqual(html, result)
def test_striptags(self):
html = '<html><body>'
html += '<p>this is a test <a>link</a> and this is <strong>strong</strong></p>'
html += '</body></html>'
expected = '<html><body>'
expected += '<p>this is a test link and this is strong</p>'
expected += '</body></html>'
doc = Parser.fromstring(html)
Parser.stripTags(doc, 'a', 'strong')
result = Parser.nodeToString(doc)
self.assertEqual(expected, result)
def test_getElementsByTags(self):
html = '<html><body>'
html += '<p>this is a test <a class="link">link</a> and this is <strong class="link">strong</strong></p>'
html += '<p>this is a test and this is <strong class="link">strong</strong></p>'
html += '</body></html>'
doc = Parser.fromstring(html)
elements = Parser.getElementsByTags(doc, ['p', 'a', 'strong'])
self.assertEqual(len(elements), 5)
# find childs within the first p
p = Parser.getElementsByTag(doc, tag='p')[0]
elements = Parser.getElementsByTags(p, ['p', 'a', 'strong'])
self.assertEqual(len(elements), 2)
def test_getElementsByTag(self):
html = '<html><body>'
html += '<p>this is a test <a>link</a> and this is <strong>strong</strong></p>'
html += '</body></html>'
doc = Parser.fromstring(html)
# find all tags
elements = Parser.getElementsByTag(doc)
self.assertEqual(len(elements), 5)
# find all p
elements = Parser.getElementsByTag(doc, tag='p')
self.assertEqual(len(elements), 1)
html = '<html><body>'
html += '<p>this is a test <a class="link classB classc">link</a> and this is <strong class="link">strong</strong></p>'
html += '<p>this is a test and this is <strong class="Link">strong</strong></p>'
html += '</body></html>'
doc = Parser.fromstring(html)
# find all p
elements = Parser.getElementsByTag(doc, tag='p')
self.assertEqual(len(elements), 2)
# find all a
elements = Parser.getElementsByTag(doc, tag='a')
self.assertEqual(len(elements), 1)
# find all strong
elements = Parser.getElementsByTag(doc, tag='strong')
self.assertEqual(len(elements), 2)
# find first p
# and find strong elemens within the p
elem = Parser.getElementsByTag(doc, tag='p')[0]
elements = Parser.getElementsByTag(elem, tag='strong')
self.assertEqual(len(elements), 1)
# test if the first p in taken in account
elem = Parser.getElementsByTag(doc, tag='p')[0]
elements = Parser.getElementsByTag(elem, tag='p')
self.assertEqual(len(elements), 0)
# find elem with class "link"
elements = Parser.getElementsByTag(doc, attr="class", value="link")
self.assertEqual(len(elements), 3)
# find elem with class "classB"
elements = Parser.getElementsByTag(doc, attr="class", value="classB")
self.assertEqual(len(elements), 1)
# find elem with class "classB"
elements = Parser.getElementsByTag(doc, attr="class", value="classc")
self.assertEqual(len(elements), 1)
# find elem with class "link" with tag strong
elements = Parser.getElementsByTag(doc, tag="strong", attr="class", value="link")
self.assertEqual(len(elements), 2)
# find elem with class "link" with tag strong
# within the second p
elem = Parser.getElementsByTag(doc, tag='p')[1]
elements = Parser.getElementsByTag(elem, tag="strong", attr="class", value="link")
self.assertEqual(len(elements), 1)
class TestArticle(unittest.TestCase):
def test_instance(self):
a = Article()
self.assertIsInstance(a, Article)
class TestExtractions(unittest.TestCase):
def setUp(self):
self.articleReport = ["=======================::. ARTICLE REPORT .::======================\n"]
def getHtml(self, filename):
return FileHelper.loadResourceFile(filename)
def getArticle(self, url, rawHTML, language=None):
config = Configuration()
if language:
config.targetLanguage = language
config.useMetaLanguge = False
config.enableImageFetching = False
g = Goose(config=config)
article = g.extractContent(url=url, rawHTML=rawHTML)
return article
def runArticleAssertions(self, article=None, expectedTitle=None,
expectedStart=None, expectedImage=None,
expectedDescription=None, expectedKeywords=None):
self.articleReport.append("URL: ")
self.articleReport.append(article.finalUrl)
self.articleReport.append('\n')
self.articleReport.append("TITLE: ")
self.articleReport.append(article.title)
self.articleReport.append('\n')
# self.articleReport.append("IMAGE: ")
# self.articleReport.append(article.topImage)
# self.articleReport.append('\n')
# self.articleReport.append("IMGKIND: ")
# self.articleReport.append(article.topImage)
# self.articleReport.append('\n')
self.articleReport.append("CONTENT: ")
self.articleReport.append(article.cleanedArticleText.replace("\n", " "))
self.articleReport.append('\n')
self.articleReport.append("METAKW: ")
self.articleReport.append(article.metaKeywords)
self.articleReport.append('\n')
self.articleReport.append("METADESC: ")
self.articleReport.append(article.metaDescription)
self.articleReport.append('\n')
self.articleReport.append("DOMAIN: ")
self.articleReport.append(article.domain)
self.articleReport.append('\n')
self.articleReport.append("LINKHASH: ")
self.articleReport.append(article.linkhash)
self.articleReport.append('\n')
# self.articleReport.append("MOVIES: ")
# self.articleReport.append(article.movies)
# self.articleReport.append('\n')
# self.articleReport.append("TAGS: ")
# self.articleReport.append(article.tags)
# self.articleReport.append('\n')
self.assertIsNotNone(article, msg=u"Resulting article was NULL!")
if expectedTitle:
title = article.title
self.assertIsNotNone(title, msg=u"Title was NULL!")
self.assertEqual(title, expectedTitle)
if expectedStart:
articleText = article.cleanedArticleText
self.assertIsNotNone(articleText,
msg=u"Resulting article text was NULL!")
self.assertTrue(len(expectedStart) <= len(articleText),
msg=u"Article text was not as long as expected beginning!")
actual = articleText[0:len(expectedStart)]
try:
msg = u"The beginning of the article text was not as expected!\nEXPECTED:%s\nGOT:%s" \
% (expectedStart, actual)
except UnicodeDecodeError:
msg = u"The beginning of the article text was not as expected!"
self.assertEqual(expectedStart, actual, msg=msg)
if expectedImage:
pass
if expectedDescription:
description = article.metaDescription
self.assertIsNotNone(description,
msg="Meta Description was NULL!")
msg = u"Meta Description was not as expected!\nEXPECTED:%s\nGOT:%s" \
% (expectedDescription, description)
self.assertEqual(expectedDescription, description, msg=msg)
if expectedKeywords:
pass
def printReport(self):
pprint.pprint(self.articleReport)
def test_cnn1(self):
html = self.getHtml('statichtml/cnn1.txt')
url = "http://www.cnn.com/2010/POLITICS/08/13/democrats.social.security/index.html"
title = "Democrats to use Social Security against GOP this fall"
content = "Washington (CNN) -- Democrats pledged "
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedTitle=title, expectedStart=content)
self.printReport()
def test_businessWeek1(self):
html = self.getHtml("statichtml/businessweek1.txt")
url = "http://www.businessweek.com/magazine/content/10_34/b4192066630779.htm"
title = "Olivia Munn: Queen of the Uncool"
content = "Six years ago, Olivia Munn arrived in Hollywood with fading ambitions of making it as a sports reporter and set about deploying"
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedTitle=title, expectedStart=content)
self.printReport()
def test_businessWeek2(self):
html = self.getHtml("statichtml/businessweek2.txt")
url = "http://www.businessweek.com/management/five-social-media-lessons-for-business-09202011.html"
title = "Five Social Media Lessons for Business"
content = "At Home Depot, we first realized we needed to have a real conversation with"
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedTitle=title, expectedStart=content)
self.printReport()
def test_businessWeek3(self):
html = self.getHtml("statichtml/businessweek3.txt")
url = "http://www.businessweek.com/technology/here-comes-apples-real-tv-09132011.html"
content = "Get ready, America, because by Christmas 2012 you will have an Apple TV in your living room"
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedStart=content)
self.printReport()
def test_cbslocal(self):
html = self.getHtml("statichtml/cbslocal1.txt")
url = "http://newyork.cbslocal.com/2012/06/08/bc-morning-show-american-hero-kelly-malloy/"
content = "Boomer & Craig were thrilled to welcome an American Hero into the Allstate Studio, as Kelly"
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedStart=content)
self.printReport()
def test_elmondo1(self):
html = self.getHtml("statichtml/elmondo1.txt")
url = "http://www.elmundo.es/elmundo/2012/10/28/espana/1351388909.html"
content = "Importante golpe a la banda terrorista ETA en Francia."
article = self.getArticle(url, html, language='es')
self.runArticleAssertions(article=article, expectedStart=content)
self.printReport()
def test_elpais(self):
html = self.getHtml("statichtml/elpais.txt")
url = "http://www.sociedad.elpais.com/sociedad/2012/10/27/actualidad/1351332873_157836.html"
content = "Los recortes pasan factura a los pacientes."
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedStart=content)
self.printReport()
def test_liberation(self):
html = self.getHtml("statichtml/liberation.txt")
url = "http://www.liberation.fr/politiques/2012/10/27/ayrault-assume-et-revendique-sa-methode_856451"
content = "A Toulouse, Jean-Marc Ayrault aura fait deux rappels sur"
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedStart=content)
self.printReport()
########################################
# makes lxml crash
# python: double free or corruption
def test_techcrunch1(self):
html = self.getHtml("statichtml/techcrunch1.txt")
url = "http://techcrunch.com/2011/08/13/2005-zuckerberg-didnt-want-to-take-over-the-world/"
content = "The Huffington Post has come across this fascinating five-minute interview"
title = u"2005 Zuckerberg Didn’t Want To Take Over The World"
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedTitle=title, expectedStart=content)
self.printReport()
def test_foxNews(self):
html = self.getHtml("statichtml/foxnews1.txt")
url = "http://www.foxnews.com/politics/2010/08/14/russias-nuclear-help-iran-stirs-questions-improved-relations/"
content = "Russia's announcement that it will help Iran get nuclear fuel is raising questions"
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedStart=content)
self.printReport()
def test_aolNews(self):
html = self.getHtml("statichtml/aol1.txt")
url = "http://www.aolnews.com/nation/article/the-few-the-proud-the-marines-getting-a-makeover/19592478"
content = "WASHINGTON (Aug. 13) -- Declaring \"the maritime soul of the Marine Corps\" is"
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedStart=content)
self.printReport()
def test_huffingtonPost2(self):
html = self.getHtml("statichtml/huffpo2.txt")
url = "http://www.huffingtonpost.com/2011/10/06/alabama-workers-immigration-law_n_997793.html"
content = "MONTGOMERY, Ala. -- Alabama's strict new immigration law may be backfiring."
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedStart=content)
self.printReport()
def test_testHuffingtonPost(self):
html = self.getHtml("statichtml/huffpo1.txt")
url = "http://www.huffingtonpost.com/2010/08/13/federal-reserve-pursuing_n_681540.html"
title = "Federal Reserve's Low Rate Policy Is A 'Dangerous Gamble,' Says Top Central Bank Official"
content = "A top regional Federal Reserve official sharply criticized Friday"
keywords = "federal, reserve's, low, rate, policy, is, a, 'dangerous, gamble,', says, top, central, bank, official, business"
description = "A top regional Federal Reserve official sharply criticized Friday the Fed's ongoing policy of keeping interest rates near zero -- and at record lows -- as a \"dangerous gamble.\""
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedTitle=title, expectedStart=content, expectedDescription=description)
self.printReport()
def test_espn(self):
html = self.getHtml("statichtml/espn1.txt")
url = "http://sports.espn.go.com/espn/commentary/news/story?id=5461430"
content = "If you believe what college football coaches have said about sports"
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedStart=content)
self.printReport()
def test_engadget(self):
html = self.getHtml("statichtml/engadget1.txt")
url = "http://www.engadget.com/2010/08/18/verizon-fios-set-top-boxes-getting-a-new-hd-guide-external-stor/"
content = "Streaming and downloading TV content to mobiles is nice"
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedStart=content)
self.printReport()
def test_msn1(self):
html = self.getHtml("statichtml/msn1.txt")
url = "http://lifestyle.msn.com/your-life/your-money-today/article.aspx?cp-documentid=31244150"
expected = self.getHtml("statichtml/msn1_result.txt")
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedStart=expected)
self.printReport()
# #########################################
# # FAIL CHECK
# # UNICODE
# def test_guardian1(self):
# html = self.getHtml("statichtml/guardian1.txt")
# url = "http://www.guardian.co.uk/film/2011/nov/18/kristen-wiig-bridesmaids"
# expected = self.getHtml("statichtml/guardian1_result.txt")
# article = self.getArticle(url, html)
# self.runArticleAssertions(article=article, expectedStart=expected)
# self.printReport()
def test_time(self):
html = self.getHtml("statichtml/time1.txt")
url = "http://www.time.com/time/health/article/0,8599,2011497,00.html"
title = "Invisible Oil from BP Spill May Threaten Gulf Aquatic Life"
content = "This month, the federal government released"
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedTitle=title, expectedStart=content)
self.printReport()
def test_time2(self):
html = self.getHtml("statichtml/time2.txt")
url = "http://newsfeed.time.com/2011/08/24/washington-monument-closes-to-repair-earthquake-induced-crack/"
content = "Despite what the jeers of jaded Californians might suggest"
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedStart=content)
self.printReport()
def test_cnet(self):
html = self.getHtml("statichtml/cnet1.txt")
url = "http://news.cnet.com/8301-30686_3-20014053-266.html?tag=topStories1"
content = "NEW YORK--Verizon Communications is prepping a new"
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedStart=content)
self.printReport()
def test_yahoo(self):
html = self.getHtml("statichtml/yahoo1.txt")
url = "http://news.yahoo.com/apple-says-steve-jobs-resigning-ceo-224628633.html"
content = u"SAN FRANCISCO (AP) — Steve Jobs, the mind behind the iPhone"
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedStart=content)
self.printReport()
def test_politico(self):
html = self.getHtml("statichtml/politico1.txt")
url = "http://www.politico.com/news/stories/1010/43352.html"
content = "If the newest Census Bureau estimates stay close to form"
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedStart=content)
self.printReport()
def test_businessinsider1(self):
html = self.getHtml("statichtml/businessinsider1.txt")
url = "http://articles.businessinsider.com/2011-09-21/markets/30183619_1_parliament-vote-greece-civil-servants"
content = "As everyone in the world was transfixed on the Fed"
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedStart=content)
self.printReport()
def test_businessinsider2(self):
html = self.getHtml("statichtml/businessinsider2.txt")
url = "http://www.businessinsider.com/goldman-on-the-fed-announcement-2011-9"
content = "From Goldman on the FOMC operation twist announcement"
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedStart=content)
self.printReport()
#########################################
# FAIL
# TEXT APPEND
def test_cnbc1(self):
html = self.getHtml("statichtml/cnbc1.txt")
url = "http://www.cnbc.com/id/44613978"
content = "Some traders found Wednesday's Fed statement to be a bit gloomier than expected."
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedStart=content)
self.printReport()
def test_issue24(self):
html = self.getHtml("statichtml/issue_24.txt")
url = "http://danielspicar.github.com/goose-bug.html"
expected = self.getHtml("statichtml/issue_24_result.txt")
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedStart=expected)
self.printReport()
def test_issue25(self):
html = self.getHtml("statichtml/issue_25.txt")
url = "http://www.accountancyage.com/aa/analysis/2111729/institutes-ifrs-bang"
expected = "UK INSTITUTES have thrown their weight behind rapid adoption of international financial reporting standards in the US."
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedStart=expected)
self.printReport()
#########################################
# FAIL
def test_issue28(self):
html = self.getHtml("statichtml/issue_28.txt")
url = "http://www.telegraph.co.uk/foodanddrink/foodanddrinknews/8808120/Worlds-hottest-chilli-contest-leaves-two-in-hospital.html"
expected = "Emergency services were called to Kismot Restaurant's curry-eating challenge,"
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedStart=expected)
self.printReport()
def test_issue32(self):
html = self.getHtml("statichtml/issue_32.txt")
url = "http://www.tulsaworld.com/site/articlepath.aspx?articleid=20111118_61_A16_Opposi344152&rss_lnk=7"
expected = "Opposition to a proposal to remove certain personal data"
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedStart=expected)
self.printReport()
def test_issue4(self):
html = self.getHtml("statichtml/issue_4.txt")
url = "http://www.slate.fr/story/64063/tapie-mougeotte-la-provence"
expected = u"Exercice: apr\xe8s avoir attentivement lu cette br\xe8ve parue dans L'Express, vous expliquerez en quoi elle r\xe9sume une certaine id\xe9e de la France.\n\n\xabBernar"
article = self.getArticle(url, html)
self.runArticleAssertions(article=article, expectedStart=expected)
self.printReport()
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 4,290,335,241,615,320,000 | 45.187617 | 202 | 0.656146 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.