ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7df845d50ec03fc34f465f31a9fc4b782969fbc6 | #!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
'''
Non-relativistic generalized Hartree-Fock with point group symmetry.
'''
from functools import reduce
import numpy
import scipy.linalg
from pyscf import lib
from pyscf import symm
from pyscf.lib import logger
from pyscf.scf import hf_symm
from pyscf.scf import ghf
from pyscf.scf import chkfile
from pyscf import __config__
WITH_META_LOWDIN = getattr(__config__, 'scf_analyze_with_meta_lowdin', True)
MO_BASE = getattr(__config__, 'MO_BASE', 1)
def analyze(mf, verbose=logger.DEBUG, with_meta_lowdin=WITH_META_LOWDIN,
**kwargs):
mol = mf.mol
if not mol.symmetry:
return ghf.analyze(mf, verbose, **kwargs)
mo_energy = mf.mo_energy
mo_occ = mf.mo_occ
mo_coeff = mf.mo_coeff
ovlp_ao = mf.get_ovlp()
log = logger.new_logger(mf, verbose)
if log.verbose >= logger.NOTE:
nirrep = len(mol.irrep_id)
orbsym = get_orbsym(mf.mol, mo_coeff, ovlp_ao, False)
wfnsym = 0
noccs = [sum(orbsym[mo_occ>0]==ir) for ir in mol.irrep_id]
log.note('total symmetry = %s', symm.irrep_id2name(mol.groupname, wfnsym))
log.note('occupancy for each irrep: ' + (' %4s'*nirrep), *mol.irrep_name)
log.note('double occ ' + (' %4d'*nirrep), *noccs)
log.note('**** MO energy ****')
irname_full = {}
for k,ir in enumerate(mol.irrep_id):
irname_full[ir] = mol.irrep_name[k]
irorbcnt = {}
for k, j in enumerate(orbsym):
if j in irorbcnt:
irorbcnt[j] += 1
else:
irorbcnt[j] = 1
log.note('MO #%d (%s #%d), energy= %.15g occ= %g',
k+MO_BASE, irname_full[j], irorbcnt[j], mo_energy[k],
mo_occ[k])
dm = mf.make_rdm1(mo_coeff, mo_occ)
dip = mf.dip_moment(mol, dm, verbose=log)
if with_meta_lowdin:
pop_and_chg = mf.mulliken_meta(mol, dm, s=ovlp_ao, verbose=log)
else:
pop_and_chg = mf.mulliken_pop(mol, dm, s=ovlp_ao, verbose=log)
return pop_and_chg, dip
def canonicalize(mf, mo_coeff, mo_occ, fock=None):
'''Canonicalization diagonalizes the UHF Fock matrix in occupied, virtual
subspaces separatedly (without change occupancy).
'''
mol = mf.mol
if not mol.symmetry:
return ghf.canonicalize(mf, mo_coeff, mo_occ, fock)
if getattr(mo_coeff, 'orbsym', None) is not None:
return hf_symm.canonicalize(mf, mo_coeff, mo_occ, fock)
else:
raise NotImplementedError
class GHF(ghf.GHF):
__doc__ = ghf.GHF.__doc__ + '''
Attributes for symmetry allowed GHF:
irrep_nelec : dict
Specify the number of electrons for particular irrep
{'ir_name':int, ...}.
For the irreps not listed in these dicts, the program will choose the
occupancy based on the orbital energies.
'''
def __init__(self, mol):
ghf.GHF.__init__(self, mol)
# number of electrons for each irreps
self.irrep_nelec = {}
self._keys = self._keys.union(['irrep_nelec'])
def dump_flags(self, verbose=None):
ghf.GHF.dump_flags(self, verbose)
if self.irrep_nelec:
logger.info(self, 'irrep_nelec %s', self.irrep_nelec)
return self
def build(self, mol=None):
if mol is None: mol = self.mol
if mol.symmetry:
for irname in self.irrep_nelec:
if irname not in mol.irrep_name:
logger.warn(self, 'Molecule does not have irrep %s', irname)
nelec_fix = self.irrep_nelec.values()
if any(isinstance(x, (tuple, list)) for x in nelec_fix):
msg =('Number of alpha/beta electrons cannot be assigned '
'separately in GHF. irrep_nelec = %s' % self.irrep_nelec)
raise ValueError(msg)
nelec_fix = sum(nelec_fix)
float_irname = set(mol.irrep_name) - set(self.irrep_nelec)
if nelec_fix > mol.nelectron:
msg =('More electrons defined by irrep_nelec than total num electrons. '
'mol.nelectron = %d irrep_nelec = %s' %
(mol.nelectron, self.irrep_nelec))
raise ValueError(msg)
else:
logger.info(mol, 'Freeze %d electrons in irreps %s',
nelec_fix, self.irrep_nelec.keys())
if len(float_irname) == 0 and nelec_fix != mol.nelectron:
msg =('Num electrons defined by irrep_nelec != total num electrons. '
'mol.nelectron = %d irrep_nelec = %s' %
(mol.nelectron, self.irrep_nelec))
raise ValueError(msg)
else:
logger.info(mol, ' %d free electrons in irreps %s',
mol.nelectron-nelec_fix, ' '.join(float_irname))
return ghf.GHF.build(self, mol)
def eig(self, h, s):
mol = self.mol
if not mol.symmetry:
return self._eigh(h, s)
nirrep = len(mol.symm_orb)
symm_orb = [scipy.linalg.block_diag(c, c) for c in mol.symm_orb]
s = [reduce(numpy.dot, (c.T,s,c)) for c in symm_orb]
h = [reduce(numpy.dot, (c.T,h,c)) for c in symm_orb]
cs = []
es = []
orbsym = []
for ir in range(nirrep):
e, c = self._eigh(h[ir], s[ir])
cs.append(c)
es.append(e)
orbsym.append([mol.irrep_id[ir]] * e.size)
e = numpy.hstack(es)
c = hf_symm.so2ao_mo_coeff(symm_orb, cs)
c = lib.tag_array(c, orbsym=numpy.hstack(orbsym))
return e, c
def get_grad(self, mo_coeff, mo_occ, fock=None):
g = ghf.GHF.get_grad(self, mo_coeff, mo_occ, fock)
if self.mol.symmetry:
occidx = mo_occ > 0
viridx = ~occidx
orbsym = get_orbsym(self.mol, mo_coeff)
sym_forbid = orbsym[viridx].reshape(-1,1) != orbsym[occidx]
g[sym_forbid.ravel()] = 0
return g
def get_occ(self, mo_energy=None, mo_coeff=None):
''' We assumed mo_energy are grouped by symmetry irreps, (see function
self.eig). The orbitals are sorted after SCF.
'''
if mo_energy is None: mo_energy = self.mo_energy
mol = self.mol
if not mol.symmetry:
return ghf.GHF.get_occ(self, mo_energy, mo_coeff)
orbsym = get_orbsym(mol, mo_coeff)
mo_occ = numpy.zeros_like(mo_energy)
rest_idx = numpy.ones(mo_occ.size, dtype=bool)
nelec_fix = 0
for i, ir in enumerate(mol.irrep_id):
irname = mol.irrep_name[i]
ir_idx = numpy.where(orbsym == ir)[0]
if irname in self.irrep_nelec:
n = self.irrep_nelec[irname]
occ_sort = numpy.argsort(mo_energy[ir_idx].round(9), kind='mergesort')
occ_idx = ir_idx[occ_sort[:n]]
mo_occ[occ_idx] = 1
nelec_fix += n
rest_idx[ir_idx] = False
nelec_float = mol.nelectron - nelec_fix
assert(nelec_float >= 0)
if nelec_float > 0:
rest_idx = numpy.where(rest_idx)[0]
occ_sort = numpy.argsort(mo_energy[rest_idx].round(9), kind='mergesort')
occ_idx = rest_idx[occ_sort[:nelec_float]]
mo_occ[occ_idx] = 1
vir_idx = (mo_occ==0)
if self.verbose >= logger.INFO and numpy.count_nonzero(vir_idx) > 0:
ehomo = max(mo_energy[~vir_idx])
elumo = min(mo_energy[ vir_idx])
noccs = []
for i, ir in enumerate(mol.irrep_id):
irname = mol.irrep_name[i]
ir_idx = (orbsym == ir)
noccs.append(int(mo_occ[ir_idx].sum()))
if ehomo in mo_energy[ir_idx]:
irhomo = irname
if elumo in mo_energy[ir_idx]:
irlumo = irname
logger.info(self, 'HOMO (%s) = %.15g LUMO (%s) = %.15g',
irhomo, ehomo, irlumo, elumo)
logger.debug(self, 'irrep_nelec = %s', noccs)
hf_symm._dump_mo_energy(mol, mo_energy, mo_occ, ehomo, elumo, orbsym,
verbose=self.verbose)
if mo_coeff is not None and self.verbose >= logger.DEBUG:
ss, s = self.spin_square(mo_coeff[:,mo_occ>0], self.get_ovlp())
logger.debug(self, 'multiplicity <S^2> = %.8g 2S+1 = %.8g', ss, s)
return mo_occ
def _finalize(self):
ghf.GHF._finalize(self)
# Using mergesort because it is stable. We don't want to change the
# ordering of the symmetry labels when two orbitals are degenerated.
o_sort = numpy.argsort(self.mo_energy[self.mo_occ> 0].round(9), kind='mergesort')
v_sort = numpy.argsort(self.mo_energy[self.mo_occ==0].round(9), kind='mergesort')
orbsym = get_orbsym(self.mol, self.mo_coeff)
self.mo_energy = numpy.hstack((self.mo_energy[self.mo_occ> 0][o_sort],
self.mo_energy[self.mo_occ==0][v_sort]))
self.mo_coeff = numpy.hstack((self.mo_coeff[:,self.mo_occ> 0].take(o_sort, axis=1),
self.mo_coeff[:,self.mo_occ==0].take(v_sort, axis=1)))
orbsym = numpy.hstack((orbsym[self.mo_occ> 0][o_sort],
orbsym[self.mo_occ==0][v_sort]))
self.mo_coeff = lib.tag_array(self.mo_coeff, orbsym=orbsym)
nocc = len(o_sort)
self.mo_occ[:nocc] = 1
self.mo_occ[nocc:] = 0
if self.chkfile:
chkfile.dump_scf(self.mol, self.chkfile, self.e_tot, self.mo_energy,
self.mo_coeff, self.mo_occ, overwrite_mol=False)
return self
def analyze(self, verbose=None, **kwargs):
if verbose is None: verbose = self.verbose
return analyze(self, verbose, **kwargs)
@lib.with_doc(hf_symm.get_irrep_nelec.__doc__)
def get_irrep_nelec(self, mol=None, mo_coeff=None, mo_occ=None, s=None):
if mol is None: mol = self.mol
if mo_occ is None: mo_occ = self.mo_occ
if mo_coeff is None: mo_coeff = self.mo_coeff
if s is None: s = self.get_ovlp()
return hf_symm.get_irrep_nelec(mol, mo_coeff, mo_occ, s)
canonicalize = canonicalize
def get_orbsym(mol, mo_coeff, s=None, check=False):
if mo_coeff is None:
orbsym = numpy.hstack([[ir] * mol.symm_orb[i].shape[1]
for i, ir in enumerate(mol.irrep_id)])
elif getattr(mo_coeff, 'orbsym', None) is not None:
orbsym = mo_coeff.orbsym
else:
nao = mo_coeff.shape[0] // 2
if isinstance(s, numpy.ndarray):
assert(s.size == nao**2 or numpy.allclose(s[:nao,:nao], s[nao:,nao:]))
s = s[:nao,:nao]
mo_a = mo_coeff[:nao].copy()
mo_b = mo_coeff[nao:]
zero_alpha_idx = numpy.linalg.norm(mo_a, axis=0) < 1e-7
mo_a[:,zero_alpha_idx] = mo_b[:,zero_alpha_idx]
orbsym = symm.label_orb_symm(mol, mol.irrep_id, mol.symm_orb,
mo_a, s, check)
return numpy.asarray(orbsym)
if __name__ == '__main__':
from pyscf import gto
mol = gto.Mole()
mol.build(
verbose = 1,
output = None,
atom = [['O', (0.,0.,0.)],
['O', (0.,0.,1.)], ],
basis = {'O': 'ccpvdz'},
symmetry = True,
charge = -1,
spin = 1
)
method = GHF(mol)
method.verbose = 5
method.irrep_nelec['A1u'] = 1
energy = method.kernel()
print(energy - -126.117033823738)
method.canonicalize(method.mo_coeff, method.mo_occ)
method.analyze()
|
py | 7df845f7a49a8d4cb2dfff361754a3168c1e0428 | import random
n1 = str(input("Aluno 1: "))
n2 = str(input("Aluno 2: "))
n3 = str(input("Aluno 3: "))
n4 = str(input("Aluno 4: "))
lista = [n1, n2, n3, n4]
escolhido = random.choice(lista)
print("O aluno escolhido foi {}".format(escolhido))
|
py | 7df8466f4c30973a0f0ab2cd7cf3722657ffd5f9 | from pypy.interpreter.error import OperationError
from pypy.interpreter.gateway import unwrap_spec
from rpython.rlib.rstring import StringBuilder
from pypy.module.binascii.interp_binascii import raise_Error, raise_Incomplete
from rpython.rlib.rarithmetic import ovfcheck
# ____________________________________________________________
DONE = 0x7f
SKIP = 0x7e
FAIL = 0x7d
table_a2b_hqx = [
#^@ ^A ^B ^C ^D ^E ^F ^G
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
#\b \t \n ^K ^L \r ^N ^O
FAIL, FAIL, SKIP, FAIL, FAIL, SKIP, FAIL, FAIL,
#^P ^Q ^R ^S ^T ^U ^V ^W
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
#^X ^Y ^Z ^[ ^\ ^] ^^ ^_
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
# ! " # $ % & '
FAIL, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
#( ) * + , - . /
0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, FAIL, FAIL,
#0 1 2 3 4 5 6 7
0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, FAIL,
#8 9 : ; < = > ?
0x14, 0x15, DONE, FAIL, FAIL, FAIL, FAIL, FAIL,
#@ A B C D E F G
0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D,
#H I J K L M N O
0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, FAIL,
#P Q R S T U V W
0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, FAIL,
#X Y Z [ \ ] ^ _
0x2C, 0x2D, 0x2E, 0x2F, FAIL, FAIL, FAIL, FAIL,
#` a b c d e f g
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, FAIL,
#h i j k l m n o
0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, FAIL, FAIL,
#p q r s t u v w
0x3D, 0x3E, 0x3F, FAIL, FAIL, FAIL, FAIL, FAIL,
#x y z { | } ~ ^?
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
]
table_a2b_hqx = ''.join(map(chr, table_a2b_hqx))
@unwrap_spec(ascii='bufferstr')
def a2b_hqx(space, ascii):
"""Decode .hqx coding. Returns (bin, done)."""
# overestimate the resulting length
res = StringBuilder(len(ascii))
done = 0
pending_value = 0
pending_bits = 0
for c in ascii:
n = ord(table_a2b_hqx[ord(c)])
if n <= 0x3F:
pending_value = (pending_value << 6) | n
pending_bits += 6
if pending_bits == 24:
# flush
res.append(chr(pending_value >> 16))
res.append(chr((pending_value >> 8) & 0xff))
res.append(chr(pending_value & 0xff))
pending_value = 0
pending_bits = 0
elif n == FAIL:
raise_Error(space, 'Illegal character')
elif n == DONE:
if pending_bits >= 8:
res.append(chr(pending_value >> (pending_bits - 8)))
if pending_bits >= 16:
res.append(chr((pending_value >> (pending_bits - 16)) & 0xff))
done = 1
break
#elif n == SKIP: pass
else:
if pending_bits > 0:
raise_Incomplete(space, 'String has incomplete number of bytes')
return space.newtuple([space.newbytes(res.build()), space.newint(done)])
# ____________________________________________________________
hqx_encoding = (
'!"#$%&\'()*+,-012345689@ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr')
@unwrap_spec(bin='bufferstr')
def b2a_hqx(space, bin):
"Encode .hqx data."
extra = (len(bin) + 2) // 3
try:
newlength = ovfcheck(len(bin) + extra)
except OverflowError:
raise OperationError(space.w_MemoryError, space.w_None)
res = StringBuilder(newlength)
leftchar = 0
leftbits = 0
for c in bin:
# Shift into our buffer, and output any 6bits ready
leftchar = (leftchar << 8) | ord(c)
leftbits += 8
res.append(hqx_encoding[(leftchar >> (leftbits-6)) & 0x3f])
leftbits -= 6
if leftbits >= 6:
res.append(hqx_encoding[(leftchar >> (leftbits-6)) & 0x3f])
leftbits -= 6
# Output a possible runt byte
if leftbits > 0:
leftchar <<= (6 - leftbits)
res.append(hqx_encoding[leftchar & 0x3f])
return space.newbytes(res.build())
# ____________________________________________________________
@unwrap_spec(hexbin='bufferstr')
def rledecode_hqx(space, hexbin):
"Decode hexbin RLE-coded string."
# that's a guesstimation of the resulting length
res = StringBuilder(len(hexbin))
end = len(hexbin)
i = 0
lastpushed = -1
while i < end:
c = hexbin[i]
i += 1
if c != '\x90':
res.append(c)
lastpushed = ord(c)
else:
if i == end:
raise_Incomplete(space, 'String ends with the RLE code \\x90')
count = ord(hexbin[i]) - 1
i += 1
if count < 0:
res.append('\x90')
lastpushed = 0x90
else:
if lastpushed < 0:
raise_Error(space, 'String starts with the RLE code \\x90')
res.append_multiple_char(chr(lastpushed), count)
return space.newbytes(res.build())
# ____________________________________________________________
@unwrap_spec(data='bufferstr')
def rlecode_hqx(space, data):
"Binhex RLE-code binary data."
# that's a guesstimation of the resulting length
res = StringBuilder(len(data))
i = 0
end = len(data)
while i < end:
c = data[i]
res.append(c)
if c == '\x90':
# Escape it, and ignore repetitions (*).
res.append('\x00')
else:
# Check how many following are the same
inend = i + 1
while inend < end and data[inend] == c and inend < i + 255:
inend += 1
if inend - i > 3:
# More than 3 in a row. Output RLE. For the case of more
# than 255, see (*) below.
res.append('\x90')
res.append(chr(inend - i))
i = inend
continue
i += 1
# (*) Note that we put simplicity before compatness here, like CPython.
# I am sure that if we tried harder to produce the smallest possible
# string that rledecode_hqx() would expand back to 'data', there are
# some programs somewhere that would start failing obscurely in rare
# cases.
return space.newbytes(res.build())
# ____________________________________________________________
crctab_hqx = [
0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
]
@unwrap_spec(data='bufferstr', oldcrc=int)
def crc_hqx(space, data, oldcrc):
"Compute CRC-CCIT incrementally."
crc = oldcrc
for c in data:
crc = ((crc << 8) & 0xff00) ^ crctab_hqx[((crc >> 8) & 0xff) ^ ord(c)]
return space.newint(crc)
|
py | 7df846fe9b897364145e731f4ba1ea6d23dd2fda | from flask import Flask, request, Response
from github import Github
from flask import request
import requests
import json
import os
app = Flask(__name__)
git_token = os.environ['GITHUB_TOKEN']
repo_name = ''
default_branch =''
default_user = os.environ['DEFAULT_USER']
issue_Template = """
Hi @{}, the security settings for your main branch has been changed!
This will keep your repository safe and maintain a high standards.
We included the following changes automatically:
**Changes**:
- Admin branch protection: You have absolute control over this branch.
- Pull request protection: Auto approval of pull requests, requires at least 5 approving reviews.
""".format(default_user)
@app.route('/webhook', methods=['POST'])
def respond():
git_data = request.json;
if 'action' in git_data:
if git_data['action'] == 'created':
if git_data['repository']:
repo_name = git_data['repository']['full_name'];
default_branch = git_data['repository']['default_branch']
createGitHubIssue(repo_name, 'Updated branch protection', issue_Template, default_user, 'enhancement');
createBranchProtection(git_data['repository']['owner']['login'],git_data['repository']['name'], default_branch)
return Response(status=200)
def createBranchProtection(owner,repo,branch,review_number=5):
'''This is still in preview mode'''
url = 'https://api.github.com/repos/{}/{}/branches/{}/protection'.format(owner,repo, branch)
headers = {"Authorization": "token {}".format(git_token), "Accept": "application/vnd.github.luke-cage-preview+json"}
data = {
"required_status_checks": {
"strict": True,
"contexts": [
"trevis-baby"
]
},
"enforce_admins": True,
"required_pull_request_reviews": {
"dismissal_restrictions": {
"users": [
"users"
],
"teams": [
"teams"
]
},
"dismiss_stale_reviews": True,
"require_code_owner_reviews": True,
"required_approving_review_count": review_number
},
"restrictions": {
"users": [
'users'
],
"teams": [
'teams'
],
"apps": [
'apps'
]
}
}
r = requests.put(url, headers=headers, data=json.dumps(data))
return
def createGitHubIssue(repoName, title, body=None, assignee=None, labels=None):
'''Create an issue on github.com using the given parameters.'''
client = Github(git_token)
repo = client.get_repo("%s" % (repoName))
issue = repo.create_issue(
title= title,
body= body,
assignee= assignee,
labels=[
repo.get_label(labels)
]
)
|
py | 7df84708d20783bdafd0f37f73bb3e7f5f4a7f34 | # -*- coding: utf-8 -*-
import dataset
from . import config
from . import helpers
# Module API
class Warehouse(object):
# Public
def open_spider(self, spider):
print("------ open_spider", spider.conf, spider.conn)
if spider.conf and spider.conn:
self.__conf = spider.conf
self.__conn = spider.conn
else:
# For runs trigered by scrapy CLI utility
self.__conf = helpers.get_variables(config, str.isupper)
self.__conn = {'warehouse': dataset.connect(config.WAREHOUSE_URL)}
def process_item(self, record, spider):
record.write(self.__conf, self.__conn)
return record
|
py | 7df8470b1791af919abd258f22587dc3097ab655 | # base16-qutebrowser (https://github.com/theova/base16-qutebrowser)
# Base16 qutebrowser template by theova
# Summerfruit Dark scheme by Christopher Corley (http://christop.club/)
base00 = "#151515"
base01 = "#202020"
base02 = "#303030"
base03 = "#505050"
base04 = "#B0B0B0"
base05 = "#D0D0D0"
base06 = "#E0E0E0"
base07 = "#FFFFFF"
base08 = "#FF0086"
base09 = "#FD8900"
base0A = "#ABA800"
base0B = "#00C918"
base0C = "#1FAAAA"
base0D = "#3777E6"
base0E = "#AD00A1"
base0F = "#CC6633"
# set qutebrowser colors
# Text color of the completion widget. May be a single color to use for
# all columns or a list of three colors, one for each column.
c.colors.completion.fg = base05
# Background color of the completion widget for odd rows.
c.colors.completion.odd.bg = base03
# Background color of the completion widget for even rows.
c.colors.completion.even.bg = base00
# Foreground color of completion widget category headers.
c.colors.completion.category.fg = base0A
# Background color of the completion widget category headers.
c.colors.completion.category.bg = base00
# Top border color of the completion widget category headers.
c.colors.completion.category.border.top = base00
# Bottom border color of the completion widget category headers.
c.colors.completion.category.border.bottom = base00
# Foreground color of the selected completion item.
c.colors.completion.item.selected.fg = base01
# Background color of the selected completion item.
c.colors.completion.item.selected.bg = base0A
# Top border color of the completion widget category headers.
c.colors.completion.item.selected.border.top = base0A
# Bottom border color of the selected completion item.
c.colors.completion.item.selected.border.bottom = base0A
# Foreground color of the matched text in the selected completion item.
c.colors.completion.item.selected.match.fg = base08
# Foreground color of the matched text in the completion.
c.colors.completion.match.fg = base0B
# Color of the scrollbar handle in the completion view.
c.colors.completion.scrollbar.fg = base05
# Color of the scrollbar in the completion view.
c.colors.completion.scrollbar.bg = base00
# Background color for the download bar.
c.colors.downloads.bar.bg = base00
# Color gradient start for download text.
c.colors.downloads.start.fg = base00
# Color gradient start for download backgrounds.
c.colors.downloads.start.bg = base0D
# Color gradient end for download text.
c.colors.downloads.stop.fg = base00
# Color gradient stop for download backgrounds.
c.colors.downloads.stop.bg = base0C
# Foreground color for downloads with errors.
c.colors.downloads.error.fg = base08
# Font color for hints.
c.colors.hints.fg = base00
# Background color for hints. Note that you can use a `rgba(...)` value
# for transparency.
c.colors.hints.bg = base0A
# Font color for the matched part of hints.
c.colors.hints.match.fg = base05
# Text color for the keyhint widget.
c.colors.keyhint.fg = base05
# Highlight color for keys to complete the current keychain.
c.colors.keyhint.suffix.fg = base05
# Background color of the keyhint widget.
c.colors.keyhint.bg = base00
# Foreground color of an error message.
c.colors.messages.error.fg = base00
# Background color of an error message.
c.colors.messages.error.bg = base08
# Border color of an error message.
c.colors.messages.error.border = base08
# Foreground color of a warning message.
c.colors.messages.warning.fg = base00
# Background color of a warning message.
c.colors.messages.warning.bg = base0E
# Border color of a warning message.
c.colors.messages.warning.border = base0E
# Foreground color of an info message.
c.colors.messages.info.fg = base05
# Background color of an info message.
c.colors.messages.info.bg = base00
# Border color of an info message.
c.colors.messages.info.border = base00
# Foreground color for prompts.
c.colors.prompts.fg = base05
# Border used around UI elements in prompts.
c.colors.prompts.border = base00
# Background color for prompts.
c.colors.prompts.bg = base00
# Background color for the selected item in filename prompts.
c.colors.prompts.selected.bg = base0A
# Foreground color of the statusbar.
c.colors.statusbar.normal.fg = base0B
# Background color of the statusbar.
c.colors.statusbar.normal.bg = base00
# Foreground color of the statusbar in insert mode.
c.colors.statusbar.insert.fg = base00
# Background color of the statusbar in insert mode.
c.colors.statusbar.insert.bg = base0D
# Foreground color of the statusbar in passthrough mode.
c.colors.statusbar.passthrough.fg = base00
# Background color of the statusbar in passthrough mode.
c.colors.statusbar.passthrough.bg = base0C
# Foreground color of the statusbar in private browsing mode.
c.colors.statusbar.private.fg = base00
# Background color of the statusbar in private browsing mode.
c.colors.statusbar.private.bg = base03
# Foreground color of the statusbar in command mode.
c.colors.statusbar.command.fg = base05
# Background color of the statusbar in command mode.
c.colors.statusbar.command.bg = base00
# Foreground color of the statusbar in private browsing + command mode.
c.colors.statusbar.command.private.fg = base05
# Background color of the statusbar in private browsing + command mode.
c.colors.statusbar.command.private.bg = base00
# Foreground color of the statusbar in caret mode.
c.colors.statusbar.caret.fg = base00
# Background color of the statusbar in caret mode.
c.colors.statusbar.caret.bg = base0E
# Foreground color of the statusbar in caret mode with a selection.
c.colors.statusbar.caret.selection.fg = base00
# Background color of the statusbar in caret mode with a selection.
c.colors.statusbar.caret.selection.bg = base0D
# Background color of the progress bar.
c.colors.statusbar.progress.bg = base0D
# Default foreground color of the URL in the statusbar.
c.colors.statusbar.url.fg = base05
# Foreground color of the URL in the statusbar on error.
c.colors.statusbar.url.error.fg = base08
# Foreground color of the URL in the statusbar for hovered links.
c.colors.statusbar.url.hover.fg = base05
# Foreground color of the URL in the statusbar on successful load
# (http).
c.colors.statusbar.url.success.http.fg = base0C
# Foreground color of the URL in the statusbar on successful load
# (https).
c.colors.statusbar.url.success.https.fg = base0B
# Foreground color of the URL in the statusbar when there's a warning.
c.colors.statusbar.url.warn.fg = base0E
# Background color of the tab bar.
c.colors.tabs.bar.bg = base00
# Color gradient start for the tab indicator.
c.colors.tabs.indicator.start = base0D
# Color gradient end for the tab indicator.
c.colors.tabs.indicator.stop = base0C
# Color for the tab indicator on errors.
c.colors.tabs.indicator.error = base08
# Foreground color of unselected odd tabs.
c.colors.tabs.odd.fg = base05
# Background color of unselected odd tabs.
c.colors.tabs.odd.bg = base03
# Foreground color of unselected even tabs.
c.colors.tabs.even.fg = base05
# Background color of unselected even tabs.
c.colors.tabs.even.bg = base00
# Background color of pinned unselected even tabs.
c.colors.tabs.pinned.even.bg = base0C
# Foreground color of pinned unselected even tabs.
c.colors.tabs.pinned.even.fg = base07
# Background color of pinned unselected odd tabs.
c.colors.tabs.pinned.odd.bg = base0B
# Foreground color of pinned unselected odd tabs.
c.colors.tabs.pinned.odd.fg = base07
# Background color of pinned selected even tabs.
c.colors.tabs.pinned.selected.even.bg = base05
# Foreground color of pinned selected even tabs.
c.colors.tabs.pinned.selected.even.fg = base00
# Background color of pinned selected odd tabs.
c.colors.tabs.pinned.selected.odd.bg = base05
# Foreground color of pinned selected odd tabs.
c.colors.tabs.pinned.selected.odd.fg = base0E
# Foreground color of selected odd tabs.
c.colors.tabs.selected.odd.fg = base00
# Background color of selected odd tabs.
c.colors.tabs.selected.odd.bg = base05
# Foreground color of selected even tabs.
c.colors.tabs.selected.even.fg = base00
# Background color of selected even tabs.
c.colors.tabs.selected.even.bg = base05
# Background color for webpages if unset (or empty to use the theme's
# color).
# c.colors.webpage.bg = base00
|
py | 7df8473029c7df2e472471f7466deaa50cf48a40 | #!/usr/bin/env python3
# BlueKitchen GmbH (c) 2014
# convert log output to PacketLogger format
# can be viewed with Wireshark
# APPLE PacketLogger
# typedef struct {
# uint32_t len;
# uint32_t ts_sec;
# uint32_t ts_usec;
# uint8_t type; // 0xfc for note
# }
import re
import sys
import time
import os
default_date="2001-01-01"
default_hours = 12
packet_counter = 0
last_time = default_date + " " + str(default_hours) + ":00:00.000"
def chop(line, prefix):
if line.startswith(prefix):
return line[len(prefix):]
return None
def str2hex(value):
if value:
return int(value, 16)
return None
def arrayForNet32(value):
return bytearray([value >> 24, (value >> 16) & 0xff, (value >> 8) & 0xff, value & 0xff])
def generateTimestamp(t):
global last_time
global packet_counter
# use last_time if time missing for this entry
if not t:
t = last_time
if t:
last_time = t
# check for date
parts = t.split(' ')
have_date = True
if len(parts) == 1:
# only time, prepend fixed date
have_date = False
t = "2000-01-01 " + t;
# handle ms
try:
(t1, t2) = t.split('.')
if t1 and t2:
t_obj = time.strptime(t1, "%Y-%m-%d %H:%M:%S")
tv_sec = int(time.mktime(t_obj))
if not have_date:
# start at 12:00
tv_sec += 12*60*60
tv_usec = int(t2) * 1000
return (tv_sec, tv_usec)
except ValueError:
# print 'Cannot parse time', t
pass
packet_counter += 1
return (packet_counter, 0)
def dumpPacket(fout, timestamp, type, data):
length = 9 + len(data)
(tv_sec, tv_usec) = generateTimestamp(timestamp)
fout.write(arrayForNet32(length))
fout.write(arrayForNet32(tv_sec))
fout.write(arrayForNet32(tv_usec))
fout.write(bytearray([type]))
fout.write(data)
def handleHexPacket(fout, timestamp, type, text):
try:
data = bytearray(list(map(str2hex, text.strip().split())))
dumpPacket(fout, timestamp, type, data)
except TypeError:
print('Cannot parse hexdump', text.strip())
if len(sys.argv) == 1:
print('BTstack Console to PacketLogger converter')
print('Copyright 2014, BlueKitchen GmbH')
print('')
print('Usage: ', sys.argv[0], 'ascii-log-file.txt [hci_dump.pklg]')
print('Converted hci_dump.pklg can be viewed with Wireshark and OS X PacketLogger')
exit(0)
infile = sys.argv[1]
outfile = os.path.splitext(infile)[0] + ".pklg"
if len(sys.argv) > 2:
outfile = sys.argv[2]
# with open(outfile, 'w') as fout:
with open (outfile, 'wb') as fout:
with open (infile, 'rt') as fin:
packet_counter = 0
line_conter = 0
for line in fin:
try:
# try to deal with windows 16-bit unicode by dropping \0 characters
line = ''.join([c for c in line if c != '\0'])
line_conter += 1
timestamp = None
# strip newlines
line = line.strip("\n\r")
# skip empty lines
if len(line) == 0:
continue
parts = re.match('\[(.*)\] (.*)', line)
if parts and len(parts.groups()) == 2:
(timestamp, line) = parts.groups()
rest = chop(line,'CMD => ')
if rest:
handleHexPacket(fout, timestamp, 0, rest)
continue
rest = chop(line,'EVT <= ')
if rest:
handleHexPacket(fout, timestamp, 1, rest)
continue
rest = chop(line,'ACL => ')
if rest:
handleHexPacket(fout, timestamp, 2, rest)
continue
rest = chop(line,'ACL <= ')
if rest:
handleHexPacket(fout, timestamp, 3, rest)
continue
rest = chop(line,'SCO => ')
if rest:
handleHexPacket(fout, timestamp, 8, rest)
continue
rest = chop(line,'SCO <= ')
if rest:
handleHexPacket(fout, timestamp, 9, rest)
continue
rest = chop(line,'LOG -- ')
if rest:
line = rest
dumpPacket(fout, timestamp, 0xfc, line.encode('ascii'))
except:
print("Error in line %u: '%s'" % (line_conter, line))
print("\nPacket Log: %s" % outfile)
|
py | 7df84772a92fd108f711b4b2b0ee81c79029cccd | """
Usage:
# From tensorflow/models/
# Create train data:
python generate_tfrecord.py --csv_input=data/train_labels.csv --output_path=train.record
# Create test data:
python generate_tfrecord.py --csv_input=data/test_labels.csv --output_path=test.record
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import io
import pandas as pd
import tensorflow as tf
from PIL import Image
from object_detection.utils import dataset_util
from collections import namedtuple, OrderedDict
flags = tf.app.flags
flags.DEFINE_string('csv_input', '', 'Path to the CSV input')
flags.DEFINE_string('output_path', '', 'Path to output TFRecord')
flags.DEFINE_string('image_dir', '', 'Path to images')
FLAGS = flags.FLAGS
# TO-DO replace this with label map
def class_text_to_int(row_label):
if row_label == 'mask':
return 1
elif row_label == 'nomask':
return 2
else:
None
def split(df, group):
data = namedtuple('data', ['filename', 'object'])
gb = df.groupby(group)
return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]
def create_tf_example(group, path):
with tf.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
width, height = image.size
filename = group.filename.encode('utf8')
image_format = b'jpg'
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
classes = []
for index, row in group.object.iterrows():
xmins.append(row['xmin'] / width)
xmaxs.append(row['xmax'] / width)
ymins.append(row['ymin'] / height)
ymaxs.append(row['ymax'] / height)
classes_text.append(row['class'].encode('utf8'))
classes.append(class_text_to_int(row['class']))
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(filename),
'image/source_id': dataset_util.bytes_feature(filename),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
}))
return tf_example
def main(_):
writer = tf.python_io.TFRecordWriter(FLAGS.output_path)
path = os.path.join(FLAGS.image_dir)
examples = pd.read_csv(FLAGS.csv_input)
grouped = split(examples, 'filename')
for group in grouped:
tf_example = create_tf_example(group, path)
writer.write(tf_example.SerializeToString())
writer.close()
output_path = os.path.join(os.getcwd(), FLAGS.output_path)
print('Successfully created the TFRecords: {}'.format(output_path))
if __name__ == '__main__':
tf.app.run()
|
py | 7df84798b56cbe686d67ab305ad3ac8224024778 | # -*- coding: utf-8 -*-
import json
import logging
import socket
from flask import current_app
from polylogyx.utils import DateTimeEncoder, append_node_and_rule_information_to_alert, flatten_json
from .base import AbstractAlerterPlugin
DEFAULT_KEY_FORMAT = 'polylogyx-incident-{count}'
class RsyslogAlerter(AbstractAlerterPlugin):
def __init__(self, config):
# Required configuration
self.service_key = config['service_key']
# Optional
self.client_url = config.get('client_url', '')
self.key_format = config.get('key_format', DEFAULT_KEY_FORMAT)
# Other
self.incident_count = 0
self.logger = logging.getLogger(__name__ + '.RsyslogAlerter')
def handle_alert(self, node, match,intel_match):
self.incident_count += 1
key = self.key_format.format(
count=self.incident_count
)
import datetime as dt
if match:
current_app.logger.log(logging.WARNING, 'Triggered alert: {0!r}'.format(match))
description = match.rule.template.safe_substitute(
match.result['columns'],
**node
).rstrip()
description = ":".join(description.split('\r\n\r\n', 1))
payload = json.dumps(append_node_and_rule_information_to_alert(node, flatten_json({
'event_type': 'trigger',
'service_key': self.service_key,
'incident_key': key,
'description': description,
'host_identifier': node['host_identifier'],
'client': 'PolyLogyx',
"client_url": self.client_url,
"query_name": match.result['name'],
'rule_name': match.rule.name,
'rule_description': match.rule.description,
'rule_status': match.rule.status,
'severity':match.rule.severity,
'alert_type': 'Rule',
'created_at': dt.datetime.utcnow(),
'action': match.result['action'],
'columns': match.result['columns'],
})), cls=DateTimeEncoder)
elif intel_match:
current_app.logger.log(logging.WARNING, 'Triggered alert: {0!r}'.format(intel_match))
payload = json.dumps(append_node_and_rule_information_to_alert(node, flatten_json({
'event_type': 'trigger',
'service_key': self.service_key,
'incident_key': key,
'host_identifier': node['host_identifier'],
'client': 'PolyLogyx',
"client_url": self.client_url,
'alert_type':'Threat Intel',
"query_name": intel_match.intel['query_name'],
'source_data': intel_match.data,
'source': intel_match.intel['source'],
'severity': intel_match.intel['severity'],
'created_at': dt.datetime.utcnow(),
'columns': intel_match.result,
})), cls=DateTimeEncoder)
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("rsyslogf", 514))
bSock = True
current_app.logger.info("[alert] Socket connected")
except:
bSock = False
current_app.logger.error("Unable to socket connect, is rsyslog forwarder running? If not, disable rsyslog forwading in docker compose file.")
try:
if bSock:
sock.send(payload.encode('utf-8'))
sock.send('\n'.encode('utf-8'))
finally:
if bSock:
sock.close()
current_app.logger.info("[alert] Socket closed")
|
py | 7df848b65ead88baf7ba5fed2c1023e81608fe5b | """ file: test_transforms.py
author: Jess Robertson
CSIRO Mineral Resources
date: January 2017
description: Unit tests for our transform implementations
"""
import unittest
import numpy as np
from scipy import stats
np.random.seed(42) # don't forget your towel
from earthchem.transform.isometric import IsometricLogTransform
from earthchem.transform.centered import CenteredLogTransform
from earthchem.transform.barycentric import BarycentricTransform
from earthchem.transform.additive import AdditiveLogTransform
from earthchem.transform.utilities import basis_matrix, closure
def ortho_group(dim, size=1):
"""
Draw random samples from O(N).
Stolen from scipy v. 18 code cause we don't have it here yet
Parameters:
dim - the dimension of rotation space (N).
size - the number of samples to draw (default 1).
Returns:
Random size N-dimensional matrices, dimension (size, dim, dim)
"""
size = int(size)
if size > 1:
return np.array([ortho_group(dim) for i in range(size)])
H = np.eye(dim)
for n in range(1, dim):
x = stats.norm.rvs(size=(dim-n+1,))
# random sign, 50/50, but chosen carefully to avoid roundoff error
D = np.sign(x[0])
x[0] += D * np.sqrt((x * x).sum())
# Householder transformation
Hx = -D * (np.eye(dim - n + 1)
- 2 * np.outer(x, x) / (x * x).sum())
mat = np.eye(dim)
mat[n-1:, n-1:] = Hx
H = np.dot(H, mat)
return H
def correlation_matrix(ndim, distribution=None):
"""
Generate a random correlation matrix
Parameters:
ndim - the number of dimensions in the matrix
distribution - optional, a scipy.stats distribution to
generate eigenvalues from (random eigenvalues must
be positive). Defaults to scipy.stats.gamma(1)
Returns:
a random correlation matrix
"""
# Specify the eigenvalue distribution
dist = distribution or stats.gamma(1)
# Generate random rotation
D = np.diag(dist.rvs(ndim))
Q = ortho_group(ndim)
return np.dot(np.dot(Q, D), Q.T)
class TestTransforms(unittest.TestCase):
"Unit tests for transforms"
def test_centered_logratio_isometry(self):
"Centered logratio should be isomorphic under closure"
t = CenteredLogTransform()
# Loop over different dimensions
npts = 1000
for ndim in (3, 5, 10, 20):
for _ in range(10):
# Generate random variables
mvn = stats.multivariate_normal(
mean=stats.uniform(0, 1).rvs(ndim),
cov=correlation_matrix(ndim, stats.gamma(2))
)
X = t.inverse_transform(mvn.rvs(npts))
L = t.transform(X)
# Check that things are the right size
self.assertEqual(X.shape, (npts, ndim))
self.assertEqual(L.shape, (npts, ndim))
# Check that inverse and transform are isomorphic#
# Need to use pre-transformed variables since these are
# only isomorphic to closure
self.assertTrue(np.allclose(X, t.inverse_transform(L)))
self.assertTrue(np.allclose(L, t.transform(X)))
def test_barycentric_isometry(self):
"Centered logratio should be isomorphic under closure"
t = BarycentricTransform()
# Loop over different dimensions
npts = 1000
for _ in range(10):
# Generate random variables
mvn = stats.multivariate_normal(
stats.uniform(0, 1).rvs(3),
correlation_matrix(3, stats.gamma(2)))
X = mvn.rvs(npts)
L = t.transform(X)
# Check that things are the right size
self.assertEqual(X.shape, (npts, 3))
self.assertEqual(L.shape, (npts, 2))
def test_barycentric_error(self):
"Passing data with ndims != 3 should raise a ValueError"
t = BarycentricTransform()
# Loop over different dimensions
for ndim in (2, 4, 5, 10, 20):
# Generate random variables
mvn = stats.multivariate_normal(
stats.uniform(0, 1).rvs(ndim),
correlation_matrix(ndim, stats.gamma(2)))
self.assertRaises(ValueError, t.transform, mvn.rvs(10))
def test_isometric_basis_matrix(self):
"ILR basis matrix should be constructed correctly"
for ndim in range(2, 20):
psi = basis_matrix(ndim)
# Check that Q @ Q.T = identity
self.assertTrue(
np.allclose(np.dot(psi, psi.T),
np.identity(ndim - 1)))
# Check that Q.T @ Q = identity - 1 / D * ones
expected = np.identity(ndim) - np.ones((ndim, ndim)) / ndim
self.assertTrue(np.allclose(np.dot(psi.T, psi),
expected))
def test_closure(self):
"Closure operator should work ok"
for ndim in range(2, 20):
for npts in (10, 100, 1000):
mvn = stats.multivariate_normal(stats.uniform(0, 1).rvs(3),
correlation_matrix(3, stats.gamma(2)))
X = closure(mvn.rvs(npts))
self.assertTrue(np.allclose(X.sum(axis=1), np.ones(npts)))
def test_isometric_logratio_isometry(self):
"Isometric logratio should be isomorphic under closure"
t = IsometricLogTransform()
# Loop over different dimensions
npts = 1000
for ndim in (3, 5, 10, 20):
for _ in range(10):
# Generate random variables
mvn = stats.multivariate_normal(
stats.uniform(0, 1).rvs(ndim - 1),
correlation_matrix(ndim - 1))
X = t.inverse_transform(mvn.rvs(npts))
L = t.transform(X)
# Check that things are the right size
self.assertEqual(X.shape, (npts, ndim))
self.assertEqual(L.shape, (npts, ndim - 1))
# Check that inverse and transform are isomorphic#
# Need to use pre-transformed variables since these are
# only isomorphic to closure
self.assertTrue(np.allclose(X, t.inverse_transform(L)))
self.assertTrue(np.allclose(L, t.transform(X)))
def test_additive_logratio_isometry(self):
"Additive logratio should be isomorphic under closure"
t = AdditiveLogTransform()
# Loop over different dimensions
npts = 1000
for ndim in (3, 5, 10, 20):
for _ in range(10):
# Generate random variables
mvn = stats.multivariate_normal(
stats.uniform(0, 1).rvs(ndim - 1),
correlation_matrix(ndim - 1))
X = t.inverse_transform(mvn.rvs(npts))
L = t.transform(X)
# Check that things are the right size
self.assertEqual(X.shape, (npts, ndim))
self.assertEqual(L.shape, (npts, ndim - 1))
# Check that inverse and transform are isomorphic#
# Need to use pre-transformed variables since these are
# only isomorphic to closure
self.assertTrue(np.allclose(X, t.inverse_transform(L)))
self.assertTrue(np.allclose(L, t.transform(X)))
def test_additive_logratio_isometry_with_specd_scale(self):
"Additive logratio should be isomorphic under closure"
# Loop over different dimensions
npts = 1000
for ndim in (3, 5, 10, 20):
for _ in range(10):
t = AdditiveLogTransform(np.random.randint(0, ndim))
# Generate random variables
mvn = stats.multivariate_normal(
stats.uniform(0, 1).rvs(ndim - 1),
correlation_matrix(ndim - 1))
X = t.inverse_transform(mvn.rvs(npts))
L = t.transform(X)
# Check that things are the right size
self.assertEqual(X.shape, (npts, ndim))
self.assertEqual(L.shape, (npts, ndim - 1))
# Check that inverse and transform are isomorphic#
# Need to use pre-transformed variables since these are
# only isomorphic to closure
self.assertTrue(np.allclose(X, t.inverse_transform(L)))
self.assertTrue(np.allclose(L, t.transform(X)))
def test_additive_index_error(self):
"Additive logratio should raise value error if base index > len"
ndim = 4
mvn = stats.multivariate_normal(
stats.uniform(0, 1).rvs(ndim - 1),
correlation_matrix(ndim - 1))
alr = AdditiveLogTransform(ndim + 2)
clr = CenteredLogTransform()
X = clr.inverse_transform(mvn.rvs(1000))
self.assertRaises(ValueError, alr.transform, X)
if __name__ == '__main__':
unittest.main()
|
py | 7df84996f8e3ecc384fda9a74cfc9c2cfba21a15 | from datetime import datetime
from enum import Enum
from pydantic import BaseModel, Extra, Field
from typing import Any, Dict, List, Optional
# https://www.mongodb.com/blog/post/building-with-patterns-the-schema-versioning-pattern
SCHEMA_VERSION = "1.2"
DEFAULT_UID = "342e4568-e23b-12d3-a456-526714178000"
class Persistable(BaseModel):
uid: Optional[str]
class ModelInfo(BaseModel):
label_index: Optional[Dict[str, float]]
class Locator(BaseModel):
spec: str = Field(description="Description of the specification for this locator")
path: Any = Field(description="Locator information defined by the spec field")
class TagSource(Persistable):
schema_version: str = SCHEMA_VERSION
model_info: Optional[ModelInfo]
type: str
name: Optional[str] = Field(description="optional name of model that produces tags")
class Config:
extra = Extra.forbid
class TaggingEvent(Persistable):
schema_version: str = SCHEMA_VERSION
tagger_id: str
run_time: datetime
accuracy: Optional[float] = Field(ge=0.0, le=1.0)
class Config:
extra = Extra.forbid
class Tag(BaseModel):
uid: str = DEFAULT_UID
name: str = Field(description="name of the tag")
locator: Optional[Locator] = Field(description="optional location information, "
"for indicating a subset of a dataset that this tag applies to")
confidence: Optional[float] = Field(description="confidence provided for this tag")
event_id: Optional[str] = Field(description="id of event where this tag was created")
class DatasetType(str, Enum):
tiled = "tiled"
file = "file"
web = "web"
class DatasetCollection(Persistable):
assets: List[str]
models: Dict[str, int] # model and the quality of that model when run against a model
class Dataset(BaseModel):
uid: str = DEFAULT_UID
schema_version: str = SCHEMA_VERSION
type: DatasetType
uri: str
tags: Optional[List[Tag]]
class Config:
extra = Extra.forbid
class FileDataset(Dataset):
type = DatasetType.file
class TagPatchRequest(BaseModel):
add_tags: Optional[List[Tag]]
remove_tags: Optional[List[str]]
|
py | 7df84a1875ec8be24f5694b20c673a73b129bd1c | from .lib_template import *
class Libxml2Seeker(Seeker):
"""Seeker (Identifier) for the libxml(2) open source library."""
# Library Name
NAME = "libxml2"
# Overridden base function
def searchLib(self, logger):
"""Check if the open source library is located somewhere in the binary.
Args:
logger (logger): elementals logger instance
Return Value:
number of library instances that were found in the binary
"""
extra_parts = ["CVS", "SVN", "GIT"]
key_string = ": program compiled against libxml %d using older %d\n"
# Now search
key_indices = []
for idx, bin_str in enumerate(self._all_strings):
# we have a match
if key_string in str(bin_str):
logger.debug(f"Located a key string of {self.NAME} in address 0x{bin_str.ea:x}")
key_indices.append(idx)
break
# Now check for the version string
self._version_strings = []
for key_index in key_indices:
for bin_str in self._all_strings[max(key_index - 10000, 0):min(key_index + 10000, len(self._all_strings))]:
cur_str = str(bin_str)
if cur_str.find("-") != -1 and cur_str.split("-")[1][:3] in extra_parts:
logger.debug(f"Located a version string of {self.NAME} in address 0x{bin_str.ea:x}")
self._version_strings.append(cur_str)
break
# return the result
return len(self._version_strings)
# Overridden base function
def identifyVersions(self, logger):
"""Identify the version(s) of the library (assuming it was already found).
Assumptions:
1. searchLib() was called before calling identifyVersions()
2. The call to searchLib() returned a number > 0
Args:
logger (logger): elementals logger instance
Return Value:
list of Textual ID(s) of the library's version(s)
"""
results = []
# extract the version from the copyright string
for work_str in self._version_strings:
results.append(self.extractVersion(work_str))
# return the result
return results
# Register our class
Libxml2Seeker.register(Libxml2Seeker.NAME, Libxml2Seeker)
|
py | 7df84a375de8ecadbad05a3a5dcc57eaf6c660d9 | # coding: utf-8
#
# Copyright 2015 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from core import jobs
from core.domain import feedback_domain
from core.platform import models
import feconf
from google.appengine.ext import ndb
(base_models, feedback_models, exp_models,) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.feedback, models.NAMES.exploration
])
transaction_services = models.Registry.import_transaction_services()
class FeedbackAnalyticsRealtimeModel(
jobs.BaseRealtimeDatastoreClassForContinuousComputations):
"""A continuous-computation job that sets the number of open threads
and the total number of threads to the default integer value of zero
in the realtime layer.
"""
num_open_threads = ndb.IntegerProperty(default=0)
num_total_threads = ndb.IntegerProperty(default=0)
class FeedbackAnalyticsAggregator(jobs.BaseContinuousComputationManager):
"""A continuous-computation job that computes analytics for feedback
threads of explorations.
"""
@classmethod
def get_event_types_listened_to(cls):
return [feconf.EVENT_TYPE_NEW_THREAD_CREATED,
feconf.EVENT_TYPE_THREAD_STATUS_CHANGED]
@classmethod
def _get_realtime_datastore_class(cls):
return FeedbackAnalyticsRealtimeModel
@classmethod
def _get_batch_job_manager_class(cls):
return FeedbackAnalyticsMRJobManager
@classmethod
def _handle_incoming_event(cls, active_realtime_layer, event_type, *args):
"""Records thread analytics in the given realtime layer.
Args:
active_realtime_layer: int. The currently active realtime
datastore layer.
event_type: str. The event triggered by the student.
*args: list(*). Variable length argument list. The
first element of *args corresponds to the id
of the exploration currently being played.
"""
exp_id = args[0]
def _increment_open_threads_count():
realtime_class = cls._get_realtime_datastore_class()
realtime_model_id = realtime_class.get_realtime_id(
active_realtime_layer, exp_id)
model = realtime_class.get(realtime_model_id, strict=False)
if model is None:
realtime_class(
id=realtime_model_id, num_open_threads=1,
realtime_layer=active_realtime_layer).put()
else:
model.num_open_threads += 1
model.put()
def _increment_total_threads_count():
realtime_class = cls._get_realtime_datastore_class()
realtime_model_id = realtime_class.get_realtime_id(
active_realtime_layer, exp_id)
model = realtime_class.get(realtime_model_id, strict=False)
if model is None:
realtime_class(
id=realtime_model_id, num_total_threads=1,
realtime_layer=active_realtime_layer).put()
else:
model.num_total_threads += 1
model.put()
def _decrement_open_threads_count():
realtime_class = cls._get_realtime_datastore_class()
realtime_model_id = realtime_class.get_realtime_id(
active_realtime_layer, exp_id)
model = realtime_class.get(realtime_model_id, strict=False)
if model is None:
realtime_class(
id=realtime_model_id, num_open_threads=-1,
realtime_layer=active_realtime_layer).put()
else:
model.num_open_threads -= 1
model.put()
if event_type == feconf.EVENT_TYPE_NEW_THREAD_CREATED:
transaction_services.run_in_transaction(
_increment_total_threads_count)
transaction_services.run_in_transaction(
_increment_open_threads_count)
elif event_type == feconf.EVENT_TYPE_THREAD_STATUS_CHANGED:
old_status = args[1]
updated_status = args[2]
# Status changed from closed to open.
if (old_status != feedback_models.STATUS_CHOICES_OPEN
and updated_status == feedback_models.STATUS_CHOICES_OPEN):
transaction_services.run_in_transaction(
_increment_open_threads_count)
# Status changed from open to closed.
elif (old_status == feedback_models.STATUS_CHOICES_OPEN
and updated_status != feedback_models.STATUS_CHOICES_OPEN):
transaction_services.run_in_transaction(
_decrement_open_threads_count)
# Public query methods.
@classmethod
def get_thread_analytics_multi(cls, exploration_ids):
"""Gets the thread analytics for the explorations specified by the
exploration_ids.
Args:
exploration_ids: list(str). IDs of the explorations to get analytics
for.
Returns:
list(dict). Each dict in this list corresponds to an
exploration ID in the input list, and has two keys:
- num_open_threads: int. The count of open feedback threads
for this exploration.
- num_total_threads: int. The count of all feedback threads
for this exploration.
"""
realtime_model_ids = cls.get_multi_active_realtime_layer_ids(
exploration_ids)
realtime_models = cls._get_realtime_datastore_class().get_multi(
realtime_model_ids)
feedback_thread_analytics_models = (
feedback_models.FeedbackAnalyticsModel.get_multi(exploration_ids))
return [feedback_domain.FeedbackAnalytics(
feconf.ENTITY_TYPE_EXPLORATION, exploration_ids[i],
(realtime_models[i].num_open_threads
if realtime_models[i] is not None else 0) +
(feedback_thread_analytics_models[i].num_open_threads
if feedback_thread_analytics_models[i] is not None else 0),
(realtime_models[i].num_total_threads
if realtime_models[i] is not None else 0) +
(feedback_thread_analytics_models[i].num_total_threads
if feedback_thread_analytics_models[i] is not None else 0)
) for i in range(len(exploration_ids))]
@classmethod
def get_thread_analytics(cls, exploration_id):
"""Retrieves the analytics for feedback threads.
Args:
exploration_id: str. ID of the exploration to get analytics for.
Returns:
dict with two keys:
- num_open_threads: int. The count of open feedback threads for
this exploration.
- num_total_threads: int. The count of all feedback
threads for this exploration.
"""
return FeedbackAnalyticsAggregator.get_thread_analytics_multi(
[exploration_id])[0]
class FeedbackAnalyticsMRJobManager(
jobs.BaseMapReduceJobManagerForContinuousComputations):
"""Job that creates FeedbackAnalyticsModels for explorations by calculating
various analytics for feedback threads corresponding to an exploration.
Currently, this job calculates the number of open feedback threads, as well
as the total feedback thread count for each exploration.
"""
@classmethod
def _get_continuous_computation_class(cls):
return FeedbackAnalyticsAggregator
@classmethod
def entity_classes_to_map_over(cls):
return [feedback_models.GeneralFeedbackThreadModel]
@staticmethod
def map(item):
"""Map function.
Args:
item: FeedbackThreadModel. A feedback thread model instance.
Yields:
A tuple of two elements:
- str. The exploration id associated to the feedback thread.
- str. The feedback thread's status.
"""
if isinstance(item, feedback_models.GeneralFeedbackThreadModel):
yield (item.entity_id, item.status)
else:
yield (item.exploration_id, item.status)
@staticmethod
def reduce(key, stringified_values):
"""Reduce function.
Args:
key: str. The exploration ID.
stringified_values: list(str). List of all statuses from all
mappers tagged with the given key.
"""
num_open_threads = stringified_values.count(
feedback_models.STATUS_CHOICES_OPEN)
num_total_threads = len(stringified_values)
feedback_models.FeedbackAnalyticsModel.create(
key, num_open_threads, num_total_threads)
|
py | 7df84a5e30405bff18bb019d1f59c8e243ccaca5 | """Script containing the database class object
"""
from copy import deepcopy
from .relation import Relation
from .view import View
from .table import Table
from .sql import SqlScript
from ..utils.helpers import atmost_one
from ..utils.exceptions import DatabaseInputError
import logging
logger = logging.getLogger(__name__)
class Database(object):
"""Class representing a database
"""
def __init__(self, relations=None, files=None):
"""Constructor for the database class
"""
self._relations = {}
if not atmost_one(relations, files):
raise ValueError('Only one of relations and files should be given')
if files:
relations = self._initialize_relations(files)
if relations:
for relation in relations:
self.add_relation(relation)
def copy(self):
"""Create a copy of the database object
"""
return deepcopy(self)
@staticmethod
def _initialize_relations(files):
"""Read the files and create relations from the files
"""
relations = []
for filename in files:
with open(filename) as f:
script = SqlScript(f.read())
if script.creates_table():
relations.append(Table(script))
elif script.creates_view():
relations.append(View(script))
else:
raise ValueError('File does not create a relation')
return relations
def add_relation(self, relation):
"""Add a relation, only if its name is not already used.
"""
assert isinstance(relation, Relation), 'Input should be a relation'
if relation.full_name in self._relations:
raise ValueError(
'Relation %s already added to database' % relation.full_name)
self._relations[relation.full_name] = relation
def relations(self):
"""Unsorted list of relations of the database
"""
return self._relations.values()
def relation(self, relation_name):
"""Get the relation with the given name
"""
return self._relations.get(relation_name, None)
@property
def num_views(self):
"""The number of views in the database
"""
return len([a for a in self.relations() if isinstance(a, View)])
@property
def num_tables(self):
"""The number of tables in the database
"""
return len([a for a in self.relations() if isinstance(a, Table)])
def has_cycles(self, relation=None, visited=None):
"""Check if the database has no circular dependencies
"""
if visited is None:
visited = list()
if relation:
# Don't include table as own dependency, ignore references not in DB
relations_to_check = [
self.relation(x) for x in relation.dependencies
if x != relation and self.relation(x) is not None]
else:
relations_to_check = self._relations.values()
for relation in relations_to_check:
if relation.full_name in visited:
return True
# Make a copy for immutability
visited_copy = deepcopy(visited)
visited_copy.append(relation.full_name)
if self.has_cycles(relation, visited_copy):
return True
return False
def sorted_relations(self):
"""Topological sort of the relations for dependency management
"""
if self.has_cycles():
logger.warning('Database has cycles')
sorted_relations = []
graph = dict((x.full_name, x.dependencies) for x in self.relations())
# Run until the unsorted graph is empty
while graph:
acyclic = False
# Cast graph.items() to list to avoid the error "dictionary changed size during iteration"
for relation_name, dependencies in list(graph.items()):
for dependency in dependencies:
if dependency in graph:
break
else:
acyclic = True
graph.pop(relation_name)
sorted_relations.append(self.relation(relation_name))
if not acyclic:
raise RuntimeError("A cyclic dependency occurred")
return sorted_relations
def relations_script(self, function_name, **kwargs):
"""SQL Script for all the relations of the database
"""
result = SqlScript()
for relation in self.sorted_relations():
func = getattr(relation, function_name)
result.append(func(**kwargs))
return result
def grant_relations_script(self):
"""SQL Script for granting permissions all the relations of the database
"""
return self.relations_script('grant_script')
def create_relations_script(self, grant_permissions=True):
"""SQL Script for creating all the relations of the database
"""
return self.relations_script(
'create_script', grant_permissions=grant_permissions)
def drop_relations_script(self):
"""SQL Script for dropping all the relations for the database
"""
return self.relations_script('drop_script')
def recreate_relations_script(self, grant_permissions=True):
"""SQL Script for recreating all the relations of the database
"""
return self.relations_script(
'recreate_script', grant_permissions=grant_permissions)
def recreate_table_dependencies(self, table_name, grant_permissions=True):
"""Recreate the dependencies for a particular table from the database
"""
result = SqlScript()
for relation in self.relations():
if relation.full_name == table_name:
# Continue as cannnot be dependecy of self
continue
if isinstance(relation, Table):
# Recreate foreign key relations
for column_names, ref_name, ref_columns in \
relation.foreign_key_references():
if ref_name == table_name:
result.append(
relation.foreign_key_reference_script(
source_columns=column_names,
reference_name=ref_name,
reference_columns=ref_columns))
if isinstance(relation, View):
# Recreate view if pointing to table
if table_name in relation.dependencies:
result.append(relation.recreate_script(
grant_permissions=grant_permissions))
return result
@staticmethod
def _make_node_label(relation):
"""Create the table layout for graph nodes
"""
columns = list()
row = '<TR><TD ALIGN="left" PORT="{col_name}">{col_name}{pk}</TD></TR>'
for column in sorted(relation.columns(), key=lambda x: x.position):
columns.append(row.format(col_name=column.name,
pk=' (PK)' if column.primary else ''))
layout = ('<<TABLE BORDER="1" CELLBORDER="0" CELLSPACING="0">\n'
'<TR><TD BGCOLOR="lightblue">{table_name}</TD></TR>\n'
'{columns}</TABLE>>').format(table_name=relation.full_name,
columns='\n'.join(columns))
return layout
def visualize(self, filename=None, tables_to_show=None):
"""Visualize databases and create an er-diagram
Args:
filename(str): filepath for saving the er-diagram
tables_to_show(list): A list of tables to actually visualize.
Tables not included in this list will not be visualized,
but their foreign keys will be visualize if it refers to a
table in this list
"""
# Import pygraphviz for plotting the graphs
try:
import pygraphviz
except ImportError:
logger.error('Install pygraphviz for visualizing databases')
raise
if filename is None:
raise DatabaseInputError(
'Filename must be provided for visualization')
logger.info('Creating a visualization of the database')
graph = pygraphviz.AGraph(name='Database', label='Database')
tables = [r for r in self.relations() if isinstance(r, Table)]
if tables_to_show is None:
tables_to_show = [table.full_name for table in tables]
# Add nodes
for table in tables:
if table.full_name in tables_to_show:
graph.add_node(table.full_name, shape='none',
label=self._make_node_label(table))
# Add edges
for table in tables:
for cols, ref_table, ref_cols in table.foreign_key_references():
if table.full_name in tables_to_show or \
ref_table in tables_to_show:
graph.add_edge(
ref_table,
table.full_name,
tailport=ref_cols[0],
headport=cols[0],
dir='both',
arrowhead='crow',
arrowtail='dot',
)
# Plotting the graph with dot layout
graph.layout(prog='dot')
graph.draw(filename)
|
py | 7df84ab7cf346b0de02596cd8dd1464af66c38e4 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# © 2020 Nokia
# Licensed under the GNU General Public License v3.0 only
# SPDX-License-Identifier: GPL-3.0-only
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = r"""
---
module: netbox_device_bay_template
short_description: Create, update or delete device bay templates within Netbox
description:
- Creates, updates or removes device bay templates from Netbox
notes:
- Tags should be defined as a YAML list
- This should be ran with connection C(local) and hosts C(localhost)
author:
- Tobias Groß (@toerb)
requirements:
- pynetbox
version_added: '0.3.0'
options:
netbox_url:
description:
- URL of the Netbox instance resolvable by Ansible control host
required: true
type: str
netbox_token:
description:
- The token created within Netbox to authorize API access
required: true
type: str
data:
description:
- Defines the device bay template configuration
suboptions:
device_type:
description:
- The device type the device bay template will be associated to. The device type must be "parent".
required: true
type: raw
name:
description:
- The name of the device bay template
required: true
type: str
type: dict
required: true
state:
description:
- Use C(present) or C(absent) for adding or removing.
choices: [ absent, present ]
default: present
type: str
query_params:
description:
- This can be used to override the specified values in ALLOWED_QUERY_PARAMS that is defined
- in plugins/module_utils/netbox_utils.py and provides control to users on what may make
- an object unique in their environment.
required: false
type: list
elements: str
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates.
default: true
type: raw
"""
EXAMPLES = r"""
- name: "Test Netbox modules"
connection: local
hosts: localhost
gather_facts: False
tasks:
- name: Create device bay template within Netbox with only required information
netbox_device_bay_template:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
name: device bay template One
device_type: Device Type One
state: present
- name: Delete device bay template within netbox
netbox_device_bay_template:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
name: device bay template One
device_type: Device Type One
state: absent
"""
RETURN = r"""
device_bay_template:
description: Serialized object as created or already existent within Netbox
returned: success (when I(state=present))
type: dict
msg:
description: Message indicating failure or info about what has been achieved
returned: always
type: str
"""
from ansible_collections.netbox.netbox.plugins.module_utils.netbox_utils import (
NetboxAnsibleModule,
NETBOX_ARG_SPEC,
)
from ansible_collections.netbox.netbox.plugins.module_utils.netbox_dcim import (
NetboxDcimModule,
NB_DEVICE_BAY_TEMPLATES,
)
from copy import deepcopy
def main():
"""
Main entry point for module execution
"""
argument_spec = deepcopy(NETBOX_ARG_SPEC)
argument_spec.update(
dict(
data=dict(
type="dict",
required=True,
options=dict(
device_type=dict(required=True, type="raw"),
name=dict(required=True, type="str"),
),
),
)
)
required_if = [
("state", "present", ["name", "device_type"]),
("state", "absent", ["name", "device_type"]),
]
module = NetboxAnsibleModule(
argument_spec=argument_spec, supports_check_mode=True, required_if=required_if
)
netbox_device_bay_template = NetboxDcimModule(module, NB_DEVICE_BAY_TEMPLATES)
netbox_device_bay_template.run()
if __name__ == "__main__": # pragma: no cover
main()
|
py | 7df84ad3752e37d67250b4ced89a5d6b7aa7d168 | '''
Copyright <2021> <Thomas Chapman>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
# Restore world coordinate
import rhinoscriptsyntax as rs
def RestoreWorldCoordinate():
try:
rs.EnableRedraw(False)
# retreive northing and easting from text object
obj = rs.ObjectsByName("_ORIGIN_TEXT_")
if obj:
text = rs.TextObjectText(obj)
textList = text.split()
easting = float(textList[1])
northing = float(textList[3])
# create reference coordinates to make vector
orPoint = (easting, northing, 0)
point = rs.PointCoordinates(rs.ObjectsByName("_ORIGIN_POINT_"))
vector = rs.VectorCreate(orPoint, point)
# move all objects back to original origin point
allObj = rs.AllObjects()
rs.MoveObjects(allObj, vector)
# delete coordinate geometry
isCurrent = rs.IsLayerCurrent("_ORIGIN_")
if isCurrent == False:
rs.PurgeLayer("_ORIGIN_")
if isCurrent == True:
defaultCheck = rs.IsLayer("Default")
if defaultCheck == True:
rs.CurrentLayer("Default")
rs.PurgeLayer("_ORIGIN_")
if defaultCheck == False:
rs.AddLayer("Default")
rs.CurrentLayer("Default")
rs.PurgeLayer("_ORIGIN_")
rs.EnableRedraw(True)
except:
print("Failed to execute")
rs.EnableRedraw(True)
return
if __name__ == "__main__":
RestoreWorldCoordinate()
|
py | 7df84be7b09b077d8180918ce888f263979f979b | # Generated by Django 3.2.9 on 2021-11-24 06:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('patients', '0011_auto_20211121_2113'),
]
operations = [
migrations.AddField(
model_name='doctor',
name='address',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AddField(
model_name='doctor',
name='specialization',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='patient',
name='address',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AddField(
model_name='patient',
name='email',
field=models.EmailField(blank=True, max_length=254, null=True),
),
migrations.AddField(
model_name='patient',
name='occupation',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
|
py | 7df84bf77958fa06f3ed880b77a73d697a4ae08a | # Requirements:
# pip3 install -U spacy
# python3 -m spacy download de_core_news_sm
import sys
import json
import spacy
from spacy.matcher import Matcher
from spacy.matcher import PhraseMatcher
sp = spacy.load('de_core_news_sm')
with open(sys.argv[1],"r") as f:
input = f.read()
doc = sp(input)
# find articles
# matcher = Matcher(sp.vocab)
# pattern = [{"TAG": "ART"}]
# matcher.add("pronouns", None, pattern)
# matches = matcher(doc)
# for match_id, start, end in matches:
# string_id = sp.vocab.strings[match_id] # Get string representation
# span = doc[start:end] # The matched span
# print(match_id, string_id, start, end, span.text)
# replace articles
pool = {
"des": ["des", "der"],
"die": ["die", "der", "das"],
"der": ["der", "die", "das"],
"das": ["das", "der", "die"],
"dem": ["dem", "den", "der"],
"den": ["den", "dem", "der"],
"ein": ["ein", "eine", "einem", "einer", "eines"],
"eine": ["ein", "eine", "einem", "einer", "eines"],
"einer": ["ein", "eine", "einem", "einer", "eines"],
"einem": ["ein", "eine", "einem", "einer", "eines"],
"einen": ["ein", "eine", "einem", "einer", "eines"],
"eines": ["ein", "eine", "einem", "einer", "eines"]
}
index = {
"in": input,
"out": "",
"pos": []
}
is_bracket = False
i = 0
text = ''
for token in doc:
#print(token.text, token.lemma_, token.pos_, token.tag_, token.dep_)
if token.tag_ == 'ART': # tag=DET
i += 1
text += " <pos-{}>".format(i)
index["pos"].append({
'id': i,
'pos': token.text,
'pool': pool[token.text.lower()]
})
elif token.tag_ == '$,' or token.tag_ == '$.':
text += token.text
elif token.tag_ == '$(':
is_bracket = True
text += ' ' + token.text
elif token.tag_ == '$)':
text += token.text
else:
if is_bracket:
is_bracket = False
text += token.text
else:
text += ' ' + token.text
index["out"] = text
print (json.dumps(index))
#print (json.dumps(index, indent=4)) |
py | 7df84c98b17fcf9356f87b23a29388f0d5efb4b4 | import netaddr
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import F
from django.urls import reverse
from dcim.models import Device
from extras.utils import extras_features
from netbox.models import OrganizationalModel, PrimaryModel
from ipam.choices import *
from ipam.constants import *
from ipam.fields import IPNetworkField, IPAddressField
from ipam.managers import IPAddressManager
from ipam.querysets import PrefixQuerySet
from ipam.validators import DNSValidator
from utilities.querysets import RestrictedQuerySet
from virtualization.models import VirtualMachine
__all__ = (
'Aggregate',
'IPAddress',
'Prefix',
'RIR',
'Role',
)
@extras_features('custom_fields', 'custom_links', 'export_templates', 'webhooks')
class RIR(OrganizationalModel):
"""
A Regional Internet Registry (RIR) is responsible for the allocation of a large portion of the global IP address
space. This can be an organization like ARIN or RIPE, or a governing standard such as RFC 1918.
"""
name = models.CharField(
max_length=100,
unique=True
)
slug = models.SlugField(
max_length=100,
unique=True
)
is_private = models.BooleanField(
default=False,
verbose_name='Private',
help_text='IP space managed by this RIR is considered private'
)
description = models.CharField(
max_length=200,
blank=True
)
objects = RestrictedQuerySet.as_manager()
csv_headers = ['name', 'slug', 'is_private', 'description']
class Meta:
ordering = ['name']
verbose_name = 'RIR'
verbose_name_plural = 'RIRs'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('ipam:rir', args=[self.pk])
def to_csv(self):
return (
self.name,
self.slug,
self.is_private,
self.description,
)
@extras_features('custom_fields', 'custom_links', 'export_templates', 'tags', 'webhooks')
class Aggregate(PrimaryModel):
"""
An aggregate exists at the root level of the IP address space hierarchy in NetBox. Aggregates are used to organize
the hierarchy and track the overall utilization of available address space. Each Aggregate is assigned to a RIR.
"""
prefix = IPNetworkField()
rir = models.ForeignKey(
to='ipam.RIR',
on_delete=models.PROTECT,
related_name='aggregates',
verbose_name='RIR'
)
tenant = models.ForeignKey(
to='tenancy.Tenant',
on_delete=models.PROTECT,
related_name='aggregates',
blank=True,
null=True
)
date_added = models.DateField(
blank=True,
null=True
)
description = models.CharField(
max_length=200,
blank=True
)
objects = RestrictedQuerySet.as_manager()
csv_headers = ['prefix', 'rir', 'tenant', 'date_added', 'description']
clone_fields = [
'rir', 'tenant', 'date_added', 'description',
]
class Meta:
ordering = ('prefix', 'pk') # prefix may be non-unique
def __str__(self):
return str(self.prefix)
def get_absolute_url(self):
return reverse('ipam:aggregate', args=[self.pk])
def clean(self):
super().clean()
if self.prefix:
# Clear host bits from prefix
self.prefix = self.prefix.cidr
# /0 masks are not acceptable
if self.prefix.prefixlen == 0:
raise ValidationError({
'prefix': "Cannot create aggregate with /0 mask."
})
# Ensure that the aggregate being added is not covered by an existing aggregate
covering_aggregates = Aggregate.objects.filter(
prefix__net_contains_or_equals=str(self.prefix)
)
if self.pk:
covering_aggregates = covering_aggregates.exclude(pk=self.pk)
if covering_aggregates:
raise ValidationError({
'prefix': "Aggregates cannot overlap. {} is already covered by an existing aggregate ({}).".format(
self.prefix, covering_aggregates[0]
)
})
# Ensure that the aggregate being added does not cover an existing aggregate
covered_aggregates = Aggregate.objects.filter(prefix__net_contained=str(self.prefix))
if self.pk:
covered_aggregates = covered_aggregates.exclude(pk=self.pk)
if covered_aggregates:
raise ValidationError({
'prefix': "Aggregates cannot overlap. {} covers an existing aggregate ({}).".format(
self.prefix, covered_aggregates[0]
)
})
def to_csv(self):
return (
self.prefix,
self.rir.name,
self.tenant.name if self.tenant else None,
self.date_added,
self.description,
)
@property
def family(self):
if self.prefix:
return self.prefix.version
return None
def get_utilization(self):
"""
Determine the prefix utilization of the aggregate and return it as a percentage.
"""
queryset = Prefix.objects.filter(prefix__net_contained_or_equal=str(self.prefix))
child_prefixes = netaddr.IPSet([p.prefix for p in queryset])
return int(float(child_prefixes.size) / self.prefix.size * 100)
@extras_features('custom_fields', 'custom_links', 'export_templates', 'webhooks')
class Role(OrganizationalModel):
"""
A Role represents the functional role of a Prefix or VLAN; for example, "Customer," "Infrastructure," or
"Management."
"""
name = models.CharField(
max_length=100,
unique=True
)
slug = models.SlugField(
max_length=100,
unique=True
)
weight = models.PositiveSmallIntegerField(
default=1000
)
description = models.CharField(
max_length=200,
blank=True,
)
objects = RestrictedQuerySet.as_manager()
csv_headers = ['name', 'slug', 'weight', 'description']
class Meta:
ordering = ['weight', 'name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('ipam:role', args=[self.pk])
def to_csv(self):
return (
self.name,
self.slug,
self.weight,
self.description,
)
@extras_features('custom_fields', 'custom_links', 'export_templates', 'tags', 'webhooks')
class Prefix(PrimaryModel):
"""
A Prefix represents an IPv4 or IPv6 network, including mask length. Prefixes can optionally be assigned to Sites and
VRFs. A Prefix must be assigned a status and may optionally be assigned a used-define Role. A Prefix can also be
assigned to a VLAN where appropriate.
"""
prefix = IPNetworkField(
help_text='IPv4 or IPv6 network with mask'
)
site = models.ForeignKey(
to='dcim.Site',
on_delete=models.PROTECT,
related_name='prefixes',
blank=True,
null=True
)
vrf = models.ForeignKey(
to='ipam.VRF',
on_delete=models.PROTECT,
related_name='prefixes',
blank=True,
null=True,
verbose_name='VRF'
)
tenant = models.ForeignKey(
to='tenancy.Tenant',
on_delete=models.PROTECT,
related_name='prefixes',
blank=True,
null=True
)
vlan = models.ForeignKey(
to='ipam.VLAN',
on_delete=models.PROTECT,
related_name='prefixes',
blank=True,
null=True,
verbose_name='VLAN'
)
status = models.CharField(
max_length=50,
choices=PrefixStatusChoices,
default=PrefixStatusChoices.STATUS_ACTIVE,
verbose_name='Status',
help_text='Operational status of this prefix'
)
role = models.ForeignKey(
to='ipam.Role',
on_delete=models.SET_NULL,
related_name='prefixes',
blank=True,
null=True,
help_text='The primary function of this prefix'
)
is_pool = models.BooleanField(
verbose_name='Is a pool',
default=False,
help_text='All IP addresses within this prefix are considered usable'
)
description = models.CharField(
max_length=200,
blank=True
)
objects = PrefixQuerySet.as_manager()
csv_headers = [
'prefix', 'vrf', 'tenant', 'site', 'vlan_group', 'vlan', 'status', 'role', 'is_pool', 'description',
]
clone_fields = [
'site', 'vrf', 'tenant', 'vlan', 'status', 'role', 'is_pool', 'description',
]
class Meta:
ordering = (F('vrf').asc(nulls_first=True), 'prefix', 'pk') # (vrf, prefix) may be non-unique
verbose_name_plural = 'prefixes'
def __str__(self):
return str(self.prefix)
def get_absolute_url(self):
return reverse('ipam:prefix', args=[self.pk])
def clean(self):
super().clean()
if self.prefix:
# /0 masks are not acceptable
if self.prefix.prefixlen == 0:
raise ValidationError({
'prefix': "Cannot create prefix with /0 mask."
})
# Disallow host masks
if self.prefix.version == 4 and self.prefix.prefixlen == 32:
raise ValidationError({
'prefix': "Cannot create host addresses (/32) as prefixes. Create an IPv4 address instead."
})
elif self.prefix.version == 6 and self.prefix.prefixlen == 128:
raise ValidationError({
'prefix': "Cannot create host addresses (/128) as prefixes. Create an IPv6 address instead."
})
# Enforce unique IP space (if applicable)
if (self.vrf is None and settings.ENFORCE_GLOBAL_UNIQUE) or (self.vrf and self.vrf.enforce_unique):
duplicate_prefixes = self.get_duplicates()
if duplicate_prefixes:
raise ValidationError({
'prefix': "Duplicate prefix found in {}: {}".format(
"VRF {}".format(self.vrf) if self.vrf else "global table",
duplicate_prefixes.first(),
)
})
def save(self, *args, **kwargs):
if isinstance(self.prefix, netaddr.IPNetwork):
# Clear host bits from prefix
self.prefix = self.prefix.cidr
super().save(*args, **kwargs)
def to_csv(self):
return (
self.prefix,
self.vrf.name if self.vrf else None,
self.tenant.name if self.tenant else None,
self.site.name if self.site else None,
self.vlan.group.name if self.vlan and self.vlan.group else None,
self.vlan.vid if self.vlan else None,
self.get_status_display(),
self.role.name if self.role else None,
self.is_pool,
self.description,
)
@property
def family(self):
if self.prefix:
return self.prefix.version
return None
def _set_prefix_length(self, value):
"""
Expose the IPNetwork object's prefixlen attribute on the parent model so that it can be manipulated directly,
e.g. for bulk editing.
"""
if self.prefix is not None:
self.prefix.prefixlen = value
prefix_length = property(fset=_set_prefix_length)
def get_status_class(self):
return PrefixStatusChoices.CSS_CLASSES.get(self.status)
def get_duplicates(self):
return Prefix.objects.filter(vrf=self.vrf, prefix=str(self.prefix)).exclude(pk=self.pk)
def get_child_prefixes(self):
"""
Return all Prefixes within this Prefix and VRF. If this Prefix is a container in the global table, return child
Prefixes belonging to any VRF.
"""
if self.vrf is None and self.status == PrefixStatusChoices.STATUS_CONTAINER:
return Prefix.objects.filter(prefix__net_contained=str(self.prefix))
else:
return Prefix.objects.filter(prefix__net_contained=str(self.prefix), vrf=self.vrf)
def get_child_ips(self):
"""
Return all IPAddresses within this Prefix and VRF. If this Prefix is a container in the global table, return
child IPAddresses belonging to any VRF.
"""
if self.vrf is None and self.status == PrefixStatusChoices.STATUS_CONTAINER:
return IPAddress.objects.filter(address__net_host_contained=str(self.prefix))
else:
return IPAddress.objects.filter(address__net_host_contained=str(self.prefix), vrf=self.vrf)
def get_available_prefixes(self):
"""
Return all available Prefixes within this prefix as an IPSet.
"""
prefix = netaddr.IPSet(self.prefix)
child_prefixes = netaddr.IPSet([child.prefix for child in self.get_child_prefixes()])
available_prefixes = prefix - child_prefixes
return available_prefixes
def get_available_ips(self):
"""
Return all available IPs within this prefix as an IPSet.
"""
prefix = netaddr.IPSet(self.prefix)
child_ips = netaddr.IPSet([ip.address.ip for ip in self.get_child_ips()])
available_ips = prefix - child_ips
# IPv6, pool, or IPv4 /31 sets are fully usable
if self.family == 6 or self.is_pool or self.prefix.prefixlen == 31:
return available_ips
# For "normal" IPv4 prefixes, omit first and last addresses
available_ips -= netaddr.IPSet([
netaddr.IPAddress(self.prefix.first),
netaddr.IPAddress(self.prefix.last),
])
return available_ips
def get_first_available_prefix(self):
"""
Return the first available child prefix within the prefix (or None).
"""
available_prefixes = self.get_available_prefixes()
if not available_prefixes:
return None
return available_prefixes.iter_cidrs()[0]
def get_first_available_ip(self):
"""
Return the first available IP within the prefix (or None).
"""
available_ips = self.get_available_ips()
if not available_ips:
return None
return '{}/{}'.format(next(available_ips.__iter__()), self.prefix.prefixlen)
def get_utilization(self):
"""
Determine the utilization of the prefix and return it as a percentage. For Prefixes with a status of
"container", calculate utilization based on child prefixes. For all others, count child IP addresses.
"""
if self.status == PrefixStatusChoices.STATUS_CONTAINER:
queryset = Prefix.objects.filter(
prefix__net_contained=str(self.prefix),
vrf=self.vrf
)
child_prefixes = netaddr.IPSet([p.prefix for p in queryset])
return int(float(child_prefixes.size) / self.prefix.size * 100)
else:
# Compile an IPSet to avoid counting duplicate IPs
child_count = netaddr.IPSet([ip.address.ip for ip in self.get_child_ips()]).size
prefix_size = self.prefix.size
if self.prefix.version == 4 and self.prefix.prefixlen < 31 and not self.is_pool:
prefix_size -= 2
return int(float(child_count) / prefix_size * 100)
@extras_features('custom_fields', 'custom_links', 'export_templates', 'tags', 'webhooks')
class IPAddress(PrimaryModel):
"""
An IPAddress represents an individual IPv4 or IPv6 address and its mask. The mask length should match what is
configured in the real world. (Typically, only loopback interfaces are configured with /32 or /128 masks.) Like
Prefixes, IPAddresses can optionally be assigned to a VRF. An IPAddress can optionally be assigned to an Interface.
Interfaces can have zero or more IPAddresses assigned to them.
An IPAddress can also optionally point to a NAT inside IP, designating itself as a NAT outside IP. This is useful,
for example, when mapping public addresses to private addresses. When an Interface has been assigned an IPAddress
which has a NAT outside IP, that Interface's Device can use either the inside or outside IP as its primary IP.
"""
address = IPAddressField(
help_text='IPv4 or IPv6 address (with mask)'
)
vrf = models.ForeignKey(
to='ipam.VRF',
on_delete=models.PROTECT,
related_name='ip_addresses',
blank=True,
null=True,
verbose_name='VRF'
)
tenant = models.ForeignKey(
to='tenancy.Tenant',
on_delete=models.PROTECT,
related_name='ip_addresses',
blank=True,
null=True
)
status = models.CharField(
max_length=50,
choices=IPAddressStatusChoices,
default=IPAddressStatusChoices.STATUS_ACTIVE,
help_text='The operational status of this IP'
)
role = models.CharField(
max_length=50,
choices=IPAddressRoleChoices,
blank=True,
help_text='The functional role of this IP'
)
assigned_object_type = models.ForeignKey(
to=ContentType,
limit_choices_to=IPADDRESS_ASSIGNMENT_MODELS,
on_delete=models.PROTECT,
related_name='+',
blank=True,
null=True
)
assigned_object_id = models.PositiveIntegerField(
blank=True,
null=True
)
assigned_object = GenericForeignKey(
ct_field='assigned_object_type',
fk_field='assigned_object_id'
)
nat_inside = models.OneToOneField(
to='self',
on_delete=models.SET_NULL,
related_name='nat_outside',
blank=True,
null=True,
verbose_name='NAT (Inside)',
help_text='The IP for which this address is the "outside" IP'
)
dns_name = models.CharField(
max_length=255,
blank=True,
validators=[DNSValidator],
verbose_name='DNS Name',
help_text='Hostname or FQDN (not case-sensitive)'
)
description = models.CharField(
max_length=200,
blank=True
)
objects = IPAddressManager()
csv_headers = [
'address', 'vrf', 'tenant', 'status', 'role', 'assigned_object_type', 'assigned_object_id', 'is_primary',
'dns_name', 'description',
]
clone_fields = [
'vrf', 'tenant', 'status', 'role', 'description',
]
class Meta:
ordering = ('address', 'pk') # address may be non-unique
verbose_name = 'IP address'
verbose_name_plural = 'IP addresses'
def __str__(self):
return str(self.address)
def get_absolute_url(self):
return reverse('ipam:ipaddress', args=[self.pk])
def get_duplicates(self):
return IPAddress.objects.filter(
vrf=self.vrf,
address__net_host=str(self.address.ip)
).exclude(pk=self.pk)
def clean(self):
super().clean()
if self.address:
# /0 masks are not acceptable
if self.address.prefixlen == 0:
raise ValidationError({
'address': "Cannot create IP address with /0 mask."
})
# Enforce unique IP space (if applicable)
if (self.vrf is None and settings.ENFORCE_GLOBAL_UNIQUE) or (self.vrf and self.vrf.enforce_unique):
duplicate_ips = self.get_duplicates()
if duplicate_ips and (
self.role not in IPADDRESS_ROLES_NONUNIQUE or
any(dip.role not in IPADDRESS_ROLES_NONUNIQUE for dip in duplicate_ips)
):
raise ValidationError({
'address': "Duplicate IP address found in {}: {}".format(
"VRF {}".format(self.vrf) if self.vrf else "global table",
duplicate_ips.first(),
)
})
# Check for primary IP assignment that doesn't match the assigned device/VM
if self.pk:
device = Device.objects.filter(Q(primary_ip4=self) | Q(primary_ip6=self)).first()
if device:
if getattr(self.assigned_object, 'device', None) != device:
raise ValidationError({
'interface': f"IP address is primary for device {device} but not assigned to it!"
})
vm = VirtualMachine.objects.filter(Q(primary_ip4=self) | Q(primary_ip6=self)).first()
if vm:
if getattr(self.assigned_object, 'virtual_machine', None) != vm:
raise ValidationError({
'vminterface': f"IP address is primary for virtual machine {vm} but not assigned to it!"
})
# Validate IP status selection
if self.status == IPAddressStatusChoices.STATUS_SLAAC and self.family != 6:
raise ValidationError({
'status': "Only IPv6 addresses can be assigned SLAAC status"
})
def save(self, *args, **kwargs):
# Force dns_name to lowercase
self.dns_name = self.dns_name.lower()
super().save(*args, **kwargs)
def to_objectchange(self, action):
# Annotate the assigned object, if any
return super().to_objectchange(action, related_object=self.assigned_object)
def to_csv(self):
# Determine if this IP is primary for a Device
is_primary = False
if self.address.version == 4 and getattr(self, 'primary_ip4_for', False):
is_primary = True
elif self.address.version == 6 and getattr(self, 'primary_ip6_for', False):
is_primary = True
obj_type = None
if self.assigned_object_type:
obj_type = f'{self.assigned_object_type.app_label}.{self.assigned_object_type.model}'
return (
self.address,
self.vrf.name if self.vrf else None,
self.tenant.name if self.tenant else None,
self.get_status_display(),
self.get_role_display(),
obj_type,
self.assigned_object_id,
is_primary,
self.dns_name,
self.description,
)
@property
def family(self):
if self.address:
return self.address.version
return None
def _set_mask_length(self, value):
"""
Expose the IPNetwork object's prefixlen attribute on the parent model so that it can be manipulated directly,
e.g. for bulk editing.
"""
if self.address is not None:
self.address.prefixlen = value
mask_length = property(fset=_set_mask_length)
def get_status_class(self):
return IPAddressStatusChoices.CSS_CLASSES.get(self.status)
def get_role_class(self):
return IPAddressRoleChoices.CSS_CLASSES.get(self.role)
|
py | 7df84cc5c37048392c05da65f797a0aac73d29d6 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Wrapper for http://developer.download.nvidia.com/opengl/includes/wglext.h
Generated by tools/gengl.py.
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: gengl.py 601 2007-02-04 05:36:59Z Alex.Holkner $'
from ctypes import *
from pyglet.gl.lib import link_WGL as _link_function
from pyglet.gl.lib import c_ptrdiff_t, c_void
# BEGIN GENERATED CONTENT (do not edit below this line)
# This content is generated by tools/gengl.py.
# Wrapper for http://developer.download.nvidia.com/opengl/includes/wglext.h
# H (C:\cygwin\home\alex\projects\pyglet\tools\wgl.h:7)
# H (C:\cygwin\home\alex\projects\pyglet\tools\wgl.h:7)
WIN32_LEAN_AND_MEAN = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:40
WGL_WGLEXT_VERSION = 6 # http://developer.download.nvidia.com/opengl/includes/wglext.h:60
# ARB_buffer_region (http://developer.download.nvidia.com/opengl/includes/wglext.h:62)
WGL_FRONT_COLOR_BUFFER_BIT_ARB = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:63
WGL_BACK_COLOR_BUFFER_BIT_ARB = 2 # http://developer.download.nvidia.com/opengl/includes/wglext.h:64
WGL_DEPTH_BUFFER_BIT_ARB = 4 # http://developer.download.nvidia.com/opengl/includes/wglext.h:65
WGL_STENCIL_BUFFER_BIT_ARB = 8 # http://developer.download.nvidia.com/opengl/includes/wglext.h:66
# ARB_multisample (http://developer.download.nvidia.com/opengl/includes/wglext.h:69)
WGL_SAMPLE_BUFFERS_ARB = 8257 # http://developer.download.nvidia.com/opengl/includes/wglext.h:70
WGL_SAMPLES_ARB = 8258 # http://developer.download.nvidia.com/opengl/includes/wglext.h:71
# ARB_extensions_string (http://developer.download.nvidia.com/opengl/includes/wglext.h:74)
# ARB_pixel_format (http://developer.download.nvidia.com/opengl/includes/wglext.h:77)
WGL_NUMBER_PIXEL_FORMATS_ARB = 8192 # http://developer.download.nvidia.com/opengl/includes/wglext.h:78
WGL_DRAW_TO_WINDOW_ARB = 8193 # http://developer.download.nvidia.com/opengl/includes/wglext.h:79
WGL_DRAW_TO_BITMAP_ARB = 8194 # http://developer.download.nvidia.com/opengl/includes/wglext.h:80
WGL_ACCELERATION_ARB = 8195 # http://developer.download.nvidia.com/opengl/includes/wglext.h:81
WGL_NEED_PALETTE_ARB = 8196 # http://developer.download.nvidia.com/opengl/includes/wglext.h:82
WGL_NEED_SYSTEM_PALETTE_ARB = 8197 # http://developer.download.nvidia.com/opengl/includes/wglext.h:83
WGL_SWAP_LAYER_BUFFERS_ARB = 8198 # http://developer.download.nvidia.com/opengl/includes/wglext.h:84
WGL_SWAP_METHOD_ARB = 8199 # http://developer.download.nvidia.com/opengl/includes/wglext.h:85
WGL_NUMBER_OVERLAYS_ARB = 8200 # http://developer.download.nvidia.com/opengl/includes/wglext.h:86
WGL_NUMBER_UNDERLAYS_ARB = 8201 # http://developer.download.nvidia.com/opengl/includes/wglext.h:87
WGL_TRANSPARENT_ARB = 8202 # http://developer.download.nvidia.com/opengl/includes/wglext.h:88
WGL_TRANSPARENT_RED_VALUE_ARB = 8247 # http://developer.download.nvidia.com/opengl/includes/wglext.h:89
WGL_TRANSPARENT_GREEN_VALUE_ARB = 8248 # http://developer.download.nvidia.com/opengl/includes/wglext.h:90
WGL_TRANSPARENT_BLUE_VALUE_ARB = 8249 # http://developer.download.nvidia.com/opengl/includes/wglext.h:91
WGL_TRANSPARENT_ALPHA_VALUE_ARB = 8250 # http://developer.download.nvidia.com/opengl/includes/wglext.h:92
WGL_TRANSPARENT_INDEX_VALUE_ARB = 8251 # http://developer.download.nvidia.com/opengl/includes/wglext.h:93
WGL_SHARE_DEPTH_ARB = 8204 # http://developer.download.nvidia.com/opengl/includes/wglext.h:94
WGL_SHARE_STENCIL_ARB = 8205 # http://developer.download.nvidia.com/opengl/includes/wglext.h:95
WGL_SHARE_ACCUM_ARB = 8206 # http://developer.download.nvidia.com/opengl/includes/wglext.h:96
WGL_SUPPORT_GDI_ARB = 8207 # http://developer.download.nvidia.com/opengl/includes/wglext.h:97
WGL_SUPPORT_OPENGL_ARB = 8208 # http://developer.download.nvidia.com/opengl/includes/wglext.h:98
WGL_DOUBLE_BUFFER_ARB = 8209 # http://developer.download.nvidia.com/opengl/includes/wglext.h:99
WGL_STEREO_ARB = 8210 # http://developer.download.nvidia.com/opengl/includes/wglext.h:100
WGL_PIXEL_TYPE_ARB = 8211 # http://developer.download.nvidia.com/opengl/includes/wglext.h:101
WGL_COLOR_BITS_ARB = 8212 # http://developer.download.nvidia.com/opengl/includes/wglext.h:102
WGL_RED_BITS_ARB = 8213 # http://developer.download.nvidia.com/opengl/includes/wglext.h:103
WGL_RED_SHIFT_ARB = 8214 # http://developer.download.nvidia.com/opengl/includes/wglext.h:104
WGL_GREEN_BITS_ARB = 8215 # http://developer.download.nvidia.com/opengl/includes/wglext.h:105
WGL_GREEN_SHIFT_ARB = 8216 # http://developer.download.nvidia.com/opengl/includes/wglext.h:106
WGL_BLUE_BITS_ARB = 8217 # http://developer.download.nvidia.com/opengl/includes/wglext.h:107
WGL_BLUE_SHIFT_ARB = 8218 # http://developer.download.nvidia.com/opengl/includes/wglext.h:108
WGL_ALPHA_BITS_ARB = 8219 # http://developer.download.nvidia.com/opengl/includes/wglext.h:109
WGL_ALPHA_SHIFT_ARB = 8220 # http://developer.download.nvidia.com/opengl/includes/wglext.h:110
WGL_ACCUM_BITS_ARB = 8221 # http://developer.download.nvidia.com/opengl/includes/wglext.h:111
WGL_ACCUM_RED_BITS_ARB = 8222 # http://developer.download.nvidia.com/opengl/includes/wglext.h:112
WGL_ACCUM_GREEN_BITS_ARB = 8223 # http://developer.download.nvidia.com/opengl/includes/wglext.h:113
WGL_ACCUM_BLUE_BITS_ARB = 8224 # http://developer.download.nvidia.com/opengl/includes/wglext.h:114
WGL_ACCUM_ALPHA_BITS_ARB = 8225 # http://developer.download.nvidia.com/opengl/includes/wglext.h:115
WGL_DEPTH_BITS_ARB = 8226 # http://developer.download.nvidia.com/opengl/includes/wglext.h:116
WGL_STENCIL_BITS_ARB = 8227 # http://developer.download.nvidia.com/opengl/includes/wglext.h:117
WGL_AUX_BUFFERS_ARB = 8228 # http://developer.download.nvidia.com/opengl/includes/wglext.h:118
WGL_NO_ACCELERATION_ARB = 8229 # http://developer.download.nvidia.com/opengl/includes/wglext.h:119
WGL_GENERIC_ACCELERATION_ARB = 8230 # http://developer.download.nvidia.com/opengl/includes/wglext.h:120
WGL_FULL_ACCELERATION_ARB = 8231 # http://developer.download.nvidia.com/opengl/includes/wglext.h:121
WGL_SWAP_EXCHANGE_ARB = 8232 # http://developer.download.nvidia.com/opengl/includes/wglext.h:122
WGL_SWAP_COPY_ARB = 8233 # http://developer.download.nvidia.com/opengl/includes/wglext.h:123
WGL_SWAP_UNDEFINED_ARB = 8234 # http://developer.download.nvidia.com/opengl/includes/wglext.h:124
WGL_TYPE_RGBA_ARB = 8235 # http://developer.download.nvidia.com/opengl/includes/wglext.h:125
WGL_TYPE_COLORINDEX_ARB = 8236 # http://developer.download.nvidia.com/opengl/includes/wglext.h:126
# ARB_make_current_read (http://developer.download.nvidia.com/opengl/includes/wglext.h:129)
ERROR_INVALID_PIXEL_TYPE_ARB = 8259 # http://developer.download.nvidia.com/opengl/includes/wglext.h:130
ERROR_INCOMPATIBLE_DEVICE_CONTEXTS_ARB = 8276 # http://developer.download.nvidia.com/opengl/includes/wglext.h:131
# ARB_pbuffer (http://developer.download.nvidia.com/opengl/includes/wglext.h:134)
WGL_DRAW_TO_PBUFFER_ARB = 8237 # http://developer.download.nvidia.com/opengl/includes/wglext.h:135
WGL_MAX_PBUFFER_PIXELS_ARB = 8238 # http://developer.download.nvidia.com/opengl/includes/wglext.h:136
WGL_MAX_PBUFFER_WIDTH_ARB = 8239 # http://developer.download.nvidia.com/opengl/includes/wglext.h:137
WGL_MAX_PBUFFER_HEIGHT_ARB = 8240 # http://developer.download.nvidia.com/opengl/includes/wglext.h:138
WGL_PBUFFER_LARGEST_ARB = 8243 # http://developer.download.nvidia.com/opengl/includes/wglext.h:139
WGL_PBUFFER_WIDTH_ARB = 8244 # http://developer.download.nvidia.com/opengl/includes/wglext.h:140
WGL_PBUFFER_HEIGHT_ARB = 8245 # http://developer.download.nvidia.com/opengl/includes/wglext.h:141
WGL_PBUFFER_LOST_ARB = 8246 # http://developer.download.nvidia.com/opengl/includes/wglext.h:142
# ARB_render_texture (http://developer.download.nvidia.com/opengl/includes/wglext.h:145)
WGL_BIND_TO_TEXTURE_RGB_ARB = 8304 # http://developer.download.nvidia.com/opengl/includes/wglext.h:146
WGL_BIND_TO_TEXTURE_RGBA_ARB = 8305 # http://developer.download.nvidia.com/opengl/includes/wglext.h:147
WGL_TEXTURE_FORMAT_ARB = 8306 # http://developer.download.nvidia.com/opengl/includes/wglext.h:148
WGL_TEXTURE_TARGET_ARB = 8307 # http://developer.download.nvidia.com/opengl/includes/wglext.h:149
WGL_MIPMAP_TEXTURE_ARB = 8308 # http://developer.download.nvidia.com/opengl/includes/wglext.h:150
WGL_TEXTURE_RGB_ARB = 8309 # http://developer.download.nvidia.com/opengl/includes/wglext.h:151
WGL_TEXTURE_RGBA_ARB = 8310 # http://developer.download.nvidia.com/opengl/includes/wglext.h:152
WGL_NO_TEXTURE_ARB = 8311 # http://developer.download.nvidia.com/opengl/includes/wglext.h:153
WGL_TEXTURE_CUBE_MAP_ARB = 8312 # http://developer.download.nvidia.com/opengl/includes/wglext.h:154
WGL_TEXTURE_1D_ARB = 8313 # http://developer.download.nvidia.com/opengl/includes/wglext.h:155
WGL_TEXTURE_2D_ARB = 8314 # http://developer.download.nvidia.com/opengl/includes/wglext.h:156
WGL_MIPMAP_LEVEL_ARB = 8315 # http://developer.download.nvidia.com/opengl/includes/wglext.h:157
WGL_CUBE_MAP_FACE_ARB = 8316 # http://developer.download.nvidia.com/opengl/includes/wglext.h:158
WGL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB = 8317 # http://developer.download.nvidia.com/opengl/includes/wglext.h:159
WGL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB = 8318 # http://developer.download.nvidia.com/opengl/includes/wglext.h:160
WGL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB = 8319 # http://developer.download.nvidia.com/opengl/includes/wglext.h:161
WGL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB = 8320 # http://developer.download.nvidia.com/opengl/includes/wglext.h:162
WGL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB = 8321 # http://developer.download.nvidia.com/opengl/includes/wglext.h:163
WGL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB = 8322 # http://developer.download.nvidia.com/opengl/includes/wglext.h:164
WGL_FRONT_LEFT_ARB = 8323 # http://developer.download.nvidia.com/opengl/includes/wglext.h:165
WGL_FRONT_RIGHT_ARB = 8324 # http://developer.download.nvidia.com/opengl/includes/wglext.h:166
WGL_BACK_LEFT_ARB = 8325 # http://developer.download.nvidia.com/opengl/includes/wglext.h:167
WGL_BACK_RIGHT_ARB = 8326 # http://developer.download.nvidia.com/opengl/includes/wglext.h:168
WGL_AUX0_ARB = 8327 # http://developer.download.nvidia.com/opengl/includes/wglext.h:169
WGL_AUX1_ARB = 8328 # http://developer.download.nvidia.com/opengl/includes/wglext.h:170
WGL_AUX2_ARB = 8329 # http://developer.download.nvidia.com/opengl/includes/wglext.h:171
WGL_AUX3_ARB = 8330 # http://developer.download.nvidia.com/opengl/includes/wglext.h:172
WGL_AUX4_ARB = 8331 # http://developer.download.nvidia.com/opengl/includes/wglext.h:173
WGL_AUX5_ARB = 8332 # http://developer.download.nvidia.com/opengl/includes/wglext.h:174
WGL_AUX6_ARB = 8333 # http://developer.download.nvidia.com/opengl/includes/wglext.h:175
WGL_AUX7_ARB = 8334 # http://developer.download.nvidia.com/opengl/includes/wglext.h:176
WGL_AUX8_ARB = 8335 # http://developer.download.nvidia.com/opengl/includes/wglext.h:177
WGL_AUX9_ARB = 8336 # http://developer.download.nvidia.com/opengl/includes/wglext.h:178
# ARB_pixel_format_float (http://developer.download.nvidia.com/opengl/includes/wglext.h:181)
WGL_TYPE_RGBA_FLOAT_ARB = 8608 # http://developer.download.nvidia.com/opengl/includes/wglext.h:182
# EXT_make_current_read (http://developer.download.nvidia.com/opengl/includes/wglext.h:185)
ERROR_INVALID_PIXEL_TYPE_EXT = 8259 # http://developer.download.nvidia.com/opengl/includes/wglext.h:186
# EXT_pixel_format (http://developer.download.nvidia.com/opengl/includes/wglext.h:189)
WGL_NUMBER_PIXEL_FORMATS_EXT = 8192 # http://developer.download.nvidia.com/opengl/includes/wglext.h:190
WGL_DRAW_TO_WINDOW_EXT = 8193 # http://developer.download.nvidia.com/opengl/includes/wglext.h:191
WGL_DRAW_TO_BITMAP_EXT = 8194 # http://developer.download.nvidia.com/opengl/includes/wglext.h:192
WGL_ACCELERATION_EXT = 8195 # http://developer.download.nvidia.com/opengl/includes/wglext.h:193
WGL_NEED_PALETTE_EXT = 8196 # http://developer.download.nvidia.com/opengl/includes/wglext.h:194
WGL_NEED_SYSTEM_PALETTE_EXT = 8197 # http://developer.download.nvidia.com/opengl/includes/wglext.h:195
WGL_SWAP_LAYER_BUFFERS_EXT = 8198 # http://developer.download.nvidia.com/opengl/includes/wglext.h:196
WGL_SWAP_METHOD_EXT = 8199 # http://developer.download.nvidia.com/opengl/includes/wglext.h:197
WGL_NUMBER_OVERLAYS_EXT = 8200 # http://developer.download.nvidia.com/opengl/includes/wglext.h:198
WGL_NUMBER_UNDERLAYS_EXT = 8201 # http://developer.download.nvidia.com/opengl/includes/wglext.h:199
WGL_TRANSPARENT_EXT = 8202 # http://developer.download.nvidia.com/opengl/includes/wglext.h:200
WGL_TRANSPARENT_VALUE_EXT = 8203 # http://developer.download.nvidia.com/opengl/includes/wglext.h:201
WGL_SHARE_DEPTH_EXT = 8204 # http://developer.download.nvidia.com/opengl/includes/wglext.h:202
WGL_SHARE_STENCIL_EXT = 8205 # http://developer.download.nvidia.com/opengl/includes/wglext.h:203
WGL_SHARE_ACCUM_EXT = 8206 # http://developer.download.nvidia.com/opengl/includes/wglext.h:204
WGL_SUPPORT_GDI_EXT = 8207 # http://developer.download.nvidia.com/opengl/includes/wglext.h:205
WGL_SUPPORT_OPENGL_EXT = 8208 # http://developer.download.nvidia.com/opengl/includes/wglext.h:206
WGL_DOUBLE_BUFFER_EXT = 8209 # http://developer.download.nvidia.com/opengl/includes/wglext.h:207
WGL_STEREO_EXT = 8210 # http://developer.download.nvidia.com/opengl/includes/wglext.h:208
WGL_PIXEL_TYPE_EXT = 8211 # http://developer.download.nvidia.com/opengl/includes/wglext.h:209
WGL_COLOR_BITS_EXT = 8212 # http://developer.download.nvidia.com/opengl/includes/wglext.h:210
WGL_RED_BITS_EXT = 8213 # http://developer.download.nvidia.com/opengl/includes/wglext.h:211
WGL_RED_SHIFT_EXT = 8214 # http://developer.download.nvidia.com/opengl/includes/wglext.h:212
WGL_GREEN_BITS_EXT = 8215 # http://developer.download.nvidia.com/opengl/includes/wglext.h:213
WGL_GREEN_SHIFT_EXT = 8216 # http://developer.download.nvidia.com/opengl/includes/wglext.h:214
WGL_BLUE_BITS_EXT = 8217 # http://developer.download.nvidia.com/opengl/includes/wglext.h:215
WGL_BLUE_SHIFT_EXT = 8218 # http://developer.download.nvidia.com/opengl/includes/wglext.h:216
WGL_ALPHA_BITS_EXT = 8219 # http://developer.download.nvidia.com/opengl/includes/wglext.h:217
WGL_ALPHA_SHIFT_EXT = 8220 # http://developer.download.nvidia.com/opengl/includes/wglext.h:218
WGL_ACCUM_BITS_EXT = 8221 # http://developer.download.nvidia.com/opengl/includes/wglext.h:219
WGL_ACCUM_RED_BITS_EXT = 8222 # http://developer.download.nvidia.com/opengl/includes/wglext.h:220
WGL_ACCUM_GREEN_BITS_EXT = 8223 # http://developer.download.nvidia.com/opengl/includes/wglext.h:221
WGL_ACCUM_BLUE_BITS_EXT = 8224 # http://developer.download.nvidia.com/opengl/includes/wglext.h:222
WGL_ACCUM_ALPHA_BITS_EXT = 8225 # http://developer.download.nvidia.com/opengl/includes/wglext.h:223
WGL_DEPTH_BITS_EXT = 8226 # http://developer.download.nvidia.com/opengl/includes/wglext.h:224
WGL_STENCIL_BITS_EXT = 8227 # http://developer.download.nvidia.com/opengl/includes/wglext.h:225
WGL_AUX_BUFFERS_EXT = 8228 # http://developer.download.nvidia.com/opengl/includes/wglext.h:226
WGL_NO_ACCELERATION_EXT = 8229 # http://developer.download.nvidia.com/opengl/includes/wglext.h:227
WGL_GENERIC_ACCELERATION_EXT = 8230 # http://developer.download.nvidia.com/opengl/includes/wglext.h:228
WGL_FULL_ACCELERATION_EXT = 8231 # http://developer.download.nvidia.com/opengl/includes/wglext.h:229
WGL_SWAP_EXCHANGE_EXT = 8232 # http://developer.download.nvidia.com/opengl/includes/wglext.h:230
WGL_SWAP_COPY_EXT = 8233 # http://developer.download.nvidia.com/opengl/includes/wglext.h:231
WGL_SWAP_UNDEFINED_EXT = 8234 # http://developer.download.nvidia.com/opengl/includes/wglext.h:232
WGL_TYPE_RGBA_EXT = 8235 # http://developer.download.nvidia.com/opengl/includes/wglext.h:233
WGL_TYPE_COLORINDEX_EXT = 8236 # http://developer.download.nvidia.com/opengl/includes/wglext.h:234
# EXT_pbuffer (http://developer.download.nvidia.com/opengl/includes/wglext.h:237)
WGL_DRAW_TO_PBUFFER_EXT = 8237 # http://developer.download.nvidia.com/opengl/includes/wglext.h:238
WGL_MAX_PBUFFER_PIXELS_EXT = 8238 # http://developer.download.nvidia.com/opengl/includes/wglext.h:239
WGL_MAX_PBUFFER_WIDTH_EXT = 8239 # http://developer.download.nvidia.com/opengl/includes/wglext.h:240
WGL_MAX_PBUFFER_HEIGHT_EXT = 8240 # http://developer.download.nvidia.com/opengl/includes/wglext.h:241
WGL_OPTIMAL_PBUFFER_WIDTH_EXT = 8241 # http://developer.download.nvidia.com/opengl/includes/wglext.h:242
WGL_OPTIMAL_PBUFFER_HEIGHT_EXT = 8242 # http://developer.download.nvidia.com/opengl/includes/wglext.h:243
WGL_PBUFFER_LARGEST_EXT = 8243 # http://developer.download.nvidia.com/opengl/includes/wglext.h:244
WGL_PBUFFER_WIDTH_EXT = 8244 # http://developer.download.nvidia.com/opengl/includes/wglext.h:245
WGL_PBUFFER_HEIGHT_EXT = 8245 # http://developer.download.nvidia.com/opengl/includes/wglext.h:246
# EXT_depth_float (http://developer.download.nvidia.com/opengl/includes/wglext.h:249)
WGL_DEPTH_FLOAT_EXT = 8256 # http://developer.download.nvidia.com/opengl/includes/wglext.h:250
# 3DFX_multisample (http://developer.download.nvidia.com/opengl/includes/wglext.h:253)
WGL_SAMPLE_BUFFERS_3DFX = 8288 # http://developer.download.nvidia.com/opengl/includes/wglext.h:254
WGL_SAMPLES_3DFX = 8289 # http://developer.download.nvidia.com/opengl/includes/wglext.h:255
# EXT_multisample (http://developer.download.nvidia.com/opengl/includes/wglext.h:258)
WGL_SAMPLE_BUFFERS_EXT = 8257 # http://developer.download.nvidia.com/opengl/includes/wglext.h:259
WGL_SAMPLES_EXT = 8258 # http://developer.download.nvidia.com/opengl/includes/wglext.h:260
# I3D_digital_video_control (http://developer.download.nvidia.com/opengl/includes/wglext.h:263)
WGL_DIGITAL_VIDEO_CURSOR_ALPHA_FRAMEBUFFER_I3D = 8272 # http://developer.download.nvidia.com/opengl/includes/wglext.h:264
WGL_DIGITAL_VIDEO_CURSOR_ALPHA_VALUE_I3D = 8273 # http://developer.download.nvidia.com/opengl/includes/wglext.h:265
WGL_DIGITAL_VIDEO_CURSOR_INCLUDED_I3D = 8274 # http://developer.download.nvidia.com/opengl/includes/wglext.h:266
WGL_DIGITAL_VIDEO_GAMMA_CORRECTED_I3D = 8275 # http://developer.download.nvidia.com/opengl/includes/wglext.h:267
# I3D_gamma (http://developer.download.nvidia.com/opengl/includes/wglext.h:270)
WGL_GAMMA_TABLE_SIZE_I3D = 8270 # http://developer.download.nvidia.com/opengl/includes/wglext.h:271
WGL_GAMMA_EXCLUDE_DESKTOP_I3D = 8271 # http://developer.download.nvidia.com/opengl/includes/wglext.h:272
# I3D_genlock (http://developer.download.nvidia.com/opengl/includes/wglext.h:275)
WGL_GENLOCK_SOURCE_MULTIVIEW_I3D = 8260 # http://developer.download.nvidia.com/opengl/includes/wglext.h:276
WGL_GENLOCK_SOURCE_EXTENAL_SYNC_I3D = 8261 # http://developer.download.nvidia.com/opengl/includes/wglext.h:277
WGL_GENLOCK_SOURCE_EXTENAL_FIELD_I3D = 8262 # http://developer.download.nvidia.com/opengl/includes/wglext.h:278
WGL_GENLOCK_SOURCE_EXTENAL_TTL_I3D = 8263 # http://developer.download.nvidia.com/opengl/includes/wglext.h:279
WGL_GENLOCK_SOURCE_DIGITAL_SYNC_I3D = 8264 # http://developer.download.nvidia.com/opengl/includes/wglext.h:280
WGL_GENLOCK_SOURCE_DIGITAL_FIELD_I3D = 8265 # http://developer.download.nvidia.com/opengl/includes/wglext.h:281
WGL_GENLOCK_SOURCE_EDGE_FALLING_I3D = 8266 # http://developer.download.nvidia.com/opengl/includes/wglext.h:282
WGL_GENLOCK_SOURCE_EDGE_RISING_I3D = 8267 # http://developer.download.nvidia.com/opengl/includes/wglext.h:283
WGL_GENLOCK_SOURCE_EDGE_BOTH_I3D = 8268 # http://developer.download.nvidia.com/opengl/includes/wglext.h:284
# I3D_image_buffer (http://developer.download.nvidia.com/opengl/includes/wglext.h:287)
WGL_IMAGE_BUFFER_MIN_ACCESS_I3D = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:288
WGL_IMAGE_BUFFER_LOCK_I3D = 2 # http://developer.download.nvidia.com/opengl/includes/wglext.h:289
# I3D_swap_frame_lock (http://developer.download.nvidia.com/opengl/includes/wglext.h:292)
# NV_render_depth_texture (http://developer.download.nvidia.com/opengl/includes/wglext.h:295)
WGL_BIND_TO_TEXTURE_DEPTH_NV = 8355 # http://developer.download.nvidia.com/opengl/includes/wglext.h:296
WGL_BIND_TO_TEXTURE_RECTANGLE_DEPTH_NV = 8356 # http://developer.download.nvidia.com/opengl/includes/wglext.h:297
WGL_DEPTH_TEXTURE_FORMAT_NV = 8357 # http://developer.download.nvidia.com/opengl/includes/wglext.h:298
WGL_TEXTURE_DEPTH_COMPONENT_NV = 8358 # http://developer.download.nvidia.com/opengl/includes/wglext.h:299
WGL_DEPTH_COMPONENT_NV = 8359 # http://developer.download.nvidia.com/opengl/includes/wglext.h:300
# NV_render_texture_rectangle (http://developer.download.nvidia.com/opengl/includes/wglext.h:303)
WGL_BIND_TO_TEXTURE_RECTANGLE_RGB_NV = 8352 # http://developer.download.nvidia.com/opengl/includes/wglext.h:304
WGL_BIND_TO_TEXTURE_RECTANGLE_RGBA_NV = 8353 # http://developer.download.nvidia.com/opengl/includes/wglext.h:305
WGL_TEXTURE_RECTANGLE_NV = 8354 # http://developer.download.nvidia.com/opengl/includes/wglext.h:306
# ATI_pixel_format_float (http://developer.download.nvidia.com/opengl/includes/wglext.h:309)
WGL_TYPE_RGBA_FLOAT_ATI = 8608 # http://developer.download.nvidia.com/opengl/includes/wglext.h:310
WGL_RGBA_FLOAT_MODE_ATI = 34848 # http://developer.download.nvidia.com/opengl/includes/wglext.h:311
WGL_COLOR_CLEAR_UNCLAMPED_VALUE_ATI = 34869 # http://developer.download.nvidia.com/opengl/includes/wglext.h:312
# NV_float_buffer (http://developer.download.nvidia.com/opengl/includes/wglext.h:315)
WGL_FLOAT_COMPONENTS_NV = 8368 # http://developer.download.nvidia.com/opengl/includes/wglext.h:316
WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_R_NV = 8369 # http://developer.download.nvidia.com/opengl/includes/wglext.h:317
WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RG_NV = 8370 # http://developer.download.nvidia.com/opengl/includes/wglext.h:318
WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RGB_NV = 8371 # http://developer.download.nvidia.com/opengl/includes/wglext.h:319
WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RGBA_NV = 8372 # http://developer.download.nvidia.com/opengl/includes/wglext.h:320
WGL_TEXTURE_FLOAT_R_NV = 8373 # http://developer.download.nvidia.com/opengl/includes/wglext.h:321
WGL_TEXTURE_FLOAT_RG_NV = 8374 # http://developer.download.nvidia.com/opengl/includes/wglext.h:322
WGL_TEXTURE_FLOAT_RGB_NV = 8375 # http://developer.download.nvidia.com/opengl/includes/wglext.h:323
WGL_TEXTURE_FLOAT_RGBA_NV = 8376 # http://developer.download.nvidia.com/opengl/includes/wglext.h:324
# NV_swap_group (http://developer.download.nvidia.com/opengl/includes/wglext.h:327)
# NV_gpu_affinity (http://developer.download.nvidia.com/opengl/includes/wglext.h:330)
WGL_ERROR_INCOMPATIBLE_AFFINITY_MASKS_NV = 8400 # http://developer.download.nvidia.com/opengl/includes/wglext.h:331
WGL_ERROR_MISSING_AFFINITY_MASK_NV = 8401 # http://developer.download.nvidia.com/opengl/includes/wglext.h:332
# ARB_pbuffer (http://developer.download.nvidia.com/opengl/includes/wglext.h:338)
HANDLE = POINTER(None) # C:\cygwin\home\alex\projects\pyglet\tools\wgl.h:58
HPBUFFERARB = HANDLE # http://developer.download.nvidia.com/opengl/includes/wglext.h:339
# EXT_pbuffer (http://developer.download.nvidia.com/opengl/includes/wglext.h:341)
HPBUFFEREXT = HANDLE # http://developer.download.nvidia.com/opengl/includes/wglext.h:342
# NV_gpu_affinity (http://developer.download.nvidia.com/opengl/includes/wglext.h:345)
HGPUNV = HANDLE # http://developer.download.nvidia.com/opengl/includes/wglext.h:346
class struct__GPU_DEVICE(Structure):
__slots__ = [
'cb',
'DeviceName',
'DeviceString',
'Flags',
'rcVirtualScreen',
]
DWORD = c_ulong # C:\cygwin\home\alex\projects\pyglet\tools\wgl.h:54
CHAR = c_char # C:\cygwin\home\alex\projects\pyglet\tools\wgl.h:47
class struct_tagRECT(Structure):
__slots__ = [
'left',
'top',
'right',
'bottom',
]
LONG = c_long # C:\cygwin\home\alex\projects\pyglet\tools\wgl.h:53
struct_tagRECT._fields_ = [
('left', LONG),
('top', LONG),
('right', LONG),
('bottom', LONG),
]
RECT = struct_tagRECT # C:\cygwin\home\alex\projects\pyglet\tools\wgl.h:200
struct__GPU_DEVICE._fields_ = [
('cb', DWORD),
('DeviceName', CHAR * 32),
('DeviceString', CHAR * 128),
('Flags', DWORD),
('rcVirtualScreen', RECT),
]
GPU_DEVICE = struct__GPU_DEVICE # http://developer.download.nvidia.com/opengl/includes/wglext.h:353
PGPU_DEVICE = POINTER(struct__GPU_DEVICE) # http://developer.download.nvidia.com/opengl/includes/wglext.h:353
# ARB_buffer_region (http://developer.download.nvidia.com/opengl/includes/wglext.h:356)
WGL_ARB_buffer_region = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:357
HDC = HANDLE # C:\cygwin\home\alex\projects\pyglet\tools\wgl.h:61
UINT = c_uint # C:\cygwin\home\alex\projects\pyglet\tools\wgl.h:50
# http://developer.download.nvidia.com/opengl/includes/wglext.h:359
wglCreateBufferRegionARB = _link_function('wglCreateBufferRegionARB', HANDLE, [HDC, c_int, UINT], 'ARB_buffer_region')
VOID = None # C:\cygwin\home\alex\projects\pyglet\tools\wgl.h:45
# http://developer.download.nvidia.com/opengl/includes/wglext.h:360
wglDeleteBufferRegionARB = _link_function('wglDeleteBufferRegionARB', VOID, [HANDLE], 'ARB_buffer_region')
BOOL = c_long # C:\cygwin\home\alex\projects\pyglet\tools\wgl.h:52
# http://developer.download.nvidia.com/opengl/includes/wglext.h:361
wglSaveBufferRegionARB = _link_function('wglSaveBufferRegionARB', BOOL, [HANDLE, c_int, c_int, c_int, c_int], 'ARB_buffer_region')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:362
wglRestoreBufferRegionARB = _link_function('wglRestoreBufferRegionARB', BOOL, [HANDLE, c_int, c_int, c_int, c_int, c_int, c_int], 'ARB_buffer_region')
PFNWGLCREATEBUFFERREGIONARBPROC = CFUNCTYPE(HANDLE, HDC, c_int, UINT) # http://developer.download.nvidia.com/opengl/includes/wglext.h:364
PFNWGLDELETEBUFFERREGIONARBPROC = CFUNCTYPE(VOID, HANDLE) # http://developer.download.nvidia.com/opengl/includes/wglext.h:365
PFNWGLSAVEBUFFERREGIONARBPROC = CFUNCTYPE(BOOL, HANDLE, c_int, c_int, c_int, c_int) # http://developer.download.nvidia.com/opengl/includes/wglext.h:366
PFNWGLRESTOREBUFFERREGIONARBPROC = CFUNCTYPE(BOOL, HANDLE, c_int, c_int, c_int, c_int, c_int, c_int) # http://developer.download.nvidia.com/opengl/includes/wglext.h:367
# ARB_multisample (http://developer.download.nvidia.com/opengl/includes/wglext.h:370)
WGL_ARB_multisample = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:371
# ARB_extensions_string (http://developer.download.nvidia.com/opengl/includes/wglext.h:374)
WGL_ARB_extensions_string = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:375
# http://developer.download.nvidia.com/opengl/includes/wglext.h:377
wglGetExtensionsStringARB = _link_function('wglGetExtensionsStringARB', c_char_p, [HDC], 'ARB_extensions_string')
PFNWGLGETEXTENSIONSSTRINGARBPROC = CFUNCTYPE(c_char_p, HDC) # http://developer.download.nvidia.com/opengl/includes/wglext.h:379
# ARB_pixel_format (http://developer.download.nvidia.com/opengl/includes/wglext.h:382)
WGL_ARB_pixel_format = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:383
# http://developer.download.nvidia.com/opengl/includes/wglext.h:385
wglGetPixelFormatAttribivARB = _link_function('wglGetPixelFormatAttribivARB', BOOL, [HDC, c_int, c_int, UINT, POINTER(c_int), POINTER(c_int)], 'ARB_pixel_format')
FLOAT = c_float # C:\cygwin\home\alex\projects\pyglet\tools\wgl.h:55
# http://developer.download.nvidia.com/opengl/includes/wglext.h:386
wglGetPixelFormatAttribfvARB = _link_function('wglGetPixelFormatAttribfvARB', BOOL, [HDC, c_int, c_int, UINT, POINTER(c_int), POINTER(FLOAT)], 'ARB_pixel_format')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:387
wglChoosePixelFormatARB = _link_function('wglChoosePixelFormatARB', BOOL, [HDC, POINTER(c_int), POINTER(FLOAT), UINT, POINTER(c_int), POINTER(UINT)], 'ARB_pixel_format')
PFNWGLGETPIXELFORMATATTRIBIVARBPROC = CFUNCTYPE(BOOL, HDC, c_int, c_int, UINT, POINTER(c_int), POINTER(c_int)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:389
PFNWGLGETPIXELFORMATATTRIBFVARBPROC = CFUNCTYPE(BOOL, HDC, c_int, c_int, UINT, POINTER(c_int), POINTER(FLOAT)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:390
PFNWGLCHOOSEPIXELFORMATARBPROC = CFUNCTYPE(BOOL, HDC, POINTER(c_int), POINTER(FLOAT), UINT, POINTER(c_int), POINTER(UINT)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:391
# ARB_make_current_read (http://developer.download.nvidia.com/opengl/includes/wglext.h:394)
WGL_ARB_make_current_read = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:395
HGLRC = HANDLE # C:\cygwin\home\alex\projects\pyglet\tools\wgl.h:60
# http://developer.download.nvidia.com/opengl/includes/wglext.h:397
wglMakeContextCurrentARB = _link_function('wglMakeContextCurrentARB', BOOL, [HDC, HDC, HGLRC], 'ARB_make_current_read')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:398
wglGetCurrentReadDCARB = _link_function('wglGetCurrentReadDCARB', HDC, [], 'ARB_make_current_read')
PFNWGLMAKECONTEXTCURRENTARBPROC = CFUNCTYPE(BOOL, HDC, HDC, HGLRC) # http://developer.download.nvidia.com/opengl/includes/wglext.h:400
PFNWGLGETCURRENTREADDCARBPROC = CFUNCTYPE(HDC) # http://developer.download.nvidia.com/opengl/includes/wglext.h:401
# ARB_pbuffer (http://developer.download.nvidia.com/opengl/includes/wglext.h:404)
WGL_ARB_pbuffer = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:405
# http://developer.download.nvidia.com/opengl/includes/wglext.h:407
wglCreatePbufferARB = _link_function('wglCreatePbufferARB', HPBUFFERARB, [HDC, c_int, c_int, c_int, POINTER(c_int)], 'ARB_pbuffer')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:408
wglGetPbufferDCARB = _link_function('wglGetPbufferDCARB', HDC, [HPBUFFERARB], 'ARB_pbuffer')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:409
wglReleasePbufferDCARB = _link_function('wglReleasePbufferDCARB', c_int, [HPBUFFERARB, HDC], 'ARB_pbuffer')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:410
wglDestroyPbufferARB = _link_function('wglDestroyPbufferARB', BOOL, [HPBUFFERARB], 'ARB_pbuffer')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:411
wglQueryPbufferARB = _link_function('wglQueryPbufferARB', BOOL, [HPBUFFERARB, c_int, POINTER(c_int)], 'ARB_pbuffer')
PFNWGLCREATEPBUFFERARBPROC = CFUNCTYPE(HPBUFFERARB, HDC, c_int, c_int, c_int, POINTER(c_int)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:413
PFNWGLGETPBUFFERDCARBPROC = CFUNCTYPE(HDC, HPBUFFERARB) # http://developer.download.nvidia.com/opengl/includes/wglext.h:414
PFNWGLRELEASEPBUFFERDCARBPROC = CFUNCTYPE(c_int, HPBUFFERARB, HDC) # http://developer.download.nvidia.com/opengl/includes/wglext.h:415
PFNWGLDESTROYPBUFFERARBPROC = CFUNCTYPE(BOOL, HPBUFFERARB) # http://developer.download.nvidia.com/opengl/includes/wglext.h:416
PFNWGLQUERYPBUFFERARBPROC = CFUNCTYPE(BOOL, HPBUFFERARB, c_int, POINTER(c_int)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:417
# ARB_render_texture (http://developer.download.nvidia.com/opengl/includes/wglext.h:420)
WGL_ARB_render_texture = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:421
# http://developer.download.nvidia.com/opengl/includes/wglext.h:423
wglBindTexImageARB = _link_function('wglBindTexImageARB', BOOL, [HPBUFFERARB, c_int], 'ARB_render_texture')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:424
wglReleaseTexImageARB = _link_function('wglReleaseTexImageARB', BOOL, [HPBUFFERARB, c_int], 'ARB_render_texture')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:425
wglSetPbufferAttribARB = _link_function('wglSetPbufferAttribARB', BOOL, [HPBUFFERARB, POINTER(c_int)], 'ARB_render_texture')
PFNWGLBINDTEXIMAGEARBPROC = CFUNCTYPE(BOOL, HPBUFFERARB, c_int) # http://developer.download.nvidia.com/opengl/includes/wglext.h:427
PFNWGLRELEASETEXIMAGEARBPROC = CFUNCTYPE(BOOL, HPBUFFERARB, c_int) # http://developer.download.nvidia.com/opengl/includes/wglext.h:428
PFNWGLSETPBUFFERATTRIBARBPROC = CFUNCTYPE(BOOL, HPBUFFERARB, POINTER(c_int)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:429
# ARB_pixel_format_float (http://developer.download.nvidia.com/opengl/includes/wglext.h:432)
WGL_ARB_pixel_format_float = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:433
# EXT_display_color_table (http://developer.download.nvidia.com/opengl/includes/wglext.h:436)
WGL_EXT_display_color_table = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:437
GLboolean = c_ubyte # C:\cygwin\home\alex\projects\pyglet\tools\wgl.h:18
GLushort = c_ushort # C:\cygwin\home\alex\projects\pyglet\tools\wgl.h:25
# http://developer.download.nvidia.com/opengl/includes/wglext.h:439
wglCreateDisplayColorTableEXT = _link_function('wglCreateDisplayColorTableEXT', GLboolean, [GLushort], 'EXT_display_color_table')
GLuint = c_uint # C:\cygwin\home\alex\projects\pyglet\tools\wgl.h:26
# http://developer.download.nvidia.com/opengl/includes/wglext.h:440
wglLoadDisplayColorTableEXT = _link_function('wglLoadDisplayColorTableEXT', GLboolean, [POINTER(GLushort), GLuint], 'EXT_display_color_table')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:441
wglBindDisplayColorTableEXT = _link_function('wglBindDisplayColorTableEXT', GLboolean, [GLushort], 'EXT_display_color_table')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:442
wglDestroyDisplayColorTableEXT = _link_function('wglDestroyDisplayColorTableEXT', VOID, [GLushort], 'EXT_display_color_table')
PFNWGLCREATEDISPLAYCOLORTABLEEXTPROC = CFUNCTYPE(GLboolean, GLushort) # http://developer.download.nvidia.com/opengl/includes/wglext.h:444
PFNWGLLOADDISPLAYCOLORTABLEEXTPROC = CFUNCTYPE(GLboolean, POINTER(GLushort), GLuint) # http://developer.download.nvidia.com/opengl/includes/wglext.h:445
PFNWGLBINDDISPLAYCOLORTABLEEXTPROC = CFUNCTYPE(GLboolean, GLushort) # http://developer.download.nvidia.com/opengl/includes/wglext.h:446
PFNWGLDESTROYDISPLAYCOLORTABLEEXTPROC = CFUNCTYPE(VOID, GLushort) # http://developer.download.nvidia.com/opengl/includes/wglext.h:447
# EXT_extensions_string (http://developer.download.nvidia.com/opengl/includes/wglext.h:450)
WGL_EXT_extensions_string = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:451
# http://developer.download.nvidia.com/opengl/includes/wglext.h:453
wglGetExtensionsStringEXT = _link_function('wglGetExtensionsStringEXT', c_char_p, [], 'EXT_extensions_string')
PFNWGLGETEXTENSIONSSTRINGEXTPROC = CFUNCTYPE(c_char_p) # http://developer.download.nvidia.com/opengl/includes/wglext.h:455
# EXT_make_current_read (http://developer.download.nvidia.com/opengl/includes/wglext.h:458)
WGL_EXT_make_current_read = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:459
# http://developer.download.nvidia.com/opengl/includes/wglext.h:461
wglMakeContextCurrentEXT = _link_function('wglMakeContextCurrentEXT', BOOL, [HDC, HDC, HGLRC], 'EXT_make_current_read')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:462
wglGetCurrentReadDCEXT = _link_function('wglGetCurrentReadDCEXT', HDC, [], 'EXT_make_current_read')
PFNWGLMAKECONTEXTCURRENTEXTPROC = CFUNCTYPE(BOOL, HDC, HDC, HGLRC) # http://developer.download.nvidia.com/opengl/includes/wglext.h:464
PFNWGLGETCURRENTREADDCEXTPROC = CFUNCTYPE(HDC) # http://developer.download.nvidia.com/opengl/includes/wglext.h:465
# EXT_pbuffer (http://developer.download.nvidia.com/opengl/includes/wglext.h:468)
WGL_EXT_pbuffer = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:469
# http://developer.download.nvidia.com/opengl/includes/wglext.h:471
wglCreatePbufferEXT = _link_function('wglCreatePbufferEXT', HPBUFFEREXT, [HDC, c_int, c_int, c_int, POINTER(c_int)], 'EXT_pbuffer')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:472
wglGetPbufferDCEXT = _link_function('wglGetPbufferDCEXT', HDC, [HPBUFFEREXT], 'EXT_pbuffer')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:473
wglReleasePbufferDCEXT = _link_function('wglReleasePbufferDCEXT', c_int, [HPBUFFEREXT, HDC], 'EXT_pbuffer')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:474
wglDestroyPbufferEXT = _link_function('wglDestroyPbufferEXT', BOOL, [HPBUFFEREXT], 'EXT_pbuffer')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:475
wglQueryPbufferEXT = _link_function('wglQueryPbufferEXT', BOOL, [HPBUFFEREXT, c_int, POINTER(c_int)], 'EXT_pbuffer')
PFNWGLCREATEPBUFFEREXTPROC = CFUNCTYPE(HPBUFFEREXT, HDC, c_int, c_int, c_int, POINTER(c_int)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:477
PFNWGLGETPBUFFERDCEXTPROC = CFUNCTYPE(HDC, HPBUFFEREXT) # http://developer.download.nvidia.com/opengl/includes/wglext.h:478
PFNWGLRELEASEPBUFFERDCEXTPROC = CFUNCTYPE(c_int, HPBUFFEREXT, HDC) # http://developer.download.nvidia.com/opengl/includes/wglext.h:479
PFNWGLDESTROYPBUFFEREXTPROC = CFUNCTYPE(BOOL, HPBUFFEREXT) # http://developer.download.nvidia.com/opengl/includes/wglext.h:480
PFNWGLQUERYPBUFFEREXTPROC = CFUNCTYPE(BOOL, HPBUFFEREXT, c_int, POINTER(c_int)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:481
# EXT_pixel_format (http://developer.download.nvidia.com/opengl/includes/wglext.h:484)
WGL_EXT_pixel_format = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:485
# http://developer.download.nvidia.com/opengl/includes/wglext.h:487
wglGetPixelFormatAttribivEXT = _link_function('wglGetPixelFormatAttribivEXT', BOOL, [HDC, c_int, c_int, UINT, POINTER(c_int), POINTER(c_int)], 'EXT_pixel_format')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:488
wglGetPixelFormatAttribfvEXT = _link_function('wglGetPixelFormatAttribfvEXT', BOOL, [HDC, c_int, c_int, UINT, POINTER(c_int), POINTER(FLOAT)], 'EXT_pixel_format')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:489
wglChoosePixelFormatEXT = _link_function('wglChoosePixelFormatEXT', BOOL, [HDC, POINTER(c_int), POINTER(FLOAT), UINT, POINTER(c_int), POINTER(UINT)], 'EXT_pixel_format')
PFNWGLGETPIXELFORMATATTRIBIVEXTPROC = CFUNCTYPE(BOOL, HDC, c_int, c_int, UINT, POINTER(c_int), POINTER(c_int)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:491
PFNWGLGETPIXELFORMATATTRIBFVEXTPROC = CFUNCTYPE(BOOL, HDC, c_int, c_int, UINT, POINTER(c_int), POINTER(FLOAT)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:492
PFNWGLCHOOSEPIXELFORMATEXTPROC = CFUNCTYPE(BOOL, HDC, POINTER(c_int), POINTER(FLOAT), UINT, POINTER(c_int), POINTER(UINT)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:493
# EXT_swap_control (http://developer.download.nvidia.com/opengl/includes/wglext.h:496)
WGL_EXT_swap_control = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:497
# http://developer.download.nvidia.com/opengl/includes/wglext.h:499
wglSwapIntervalEXT = _link_function('wglSwapIntervalEXT', BOOL, [c_int], 'EXT_swap_control')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:500
wglGetSwapIntervalEXT = _link_function('wglGetSwapIntervalEXT', c_int, [], 'EXT_swap_control')
PFNWGLSWAPINTERVALEXTPROC = CFUNCTYPE(BOOL, c_int) # http://developer.download.nvidia.com/opengl/includes/wglext.h:502
PFNWGLGETSWAPINTERVALEXTPROC = CFUNCTYPE(c_int) # http://developer.download.nvidia.com/opengl/includes/wglext.h:503
# EXT_depth_float (http://developer.download.nvidia.com/opengl/includes/wglext.h:506)
WGL_EXT_depth_float = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:507
# NV_vertex_array_range (http://developer.download.nvidia.com/opengl/includes/wglext.h:510)
WGL_NV_vertex_array_range = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:511
GLsizei = c_int # C:\cygwin\home\alex\projects\pyglet\tools\wgl.h:23
GLfloat = c_float # C:\cygwin\home\alex\projects\pyglet\tools\wgl.h:27
# http://developer.download.nvidia.com/opengl/includes/wglext.h:513
wglAllocateMemoryNV = _link_function('wglAllocateMemoryNV', POINTER(c_void), [GLsizei, GLfloat, GLfloat, GLfloat], 'NV_vertex_array_range')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:514
wglFreeMemoryNV = _link_function('wglFreeMemoryNV', None, [POINTER(None)], 'NV_vertex_array_range')
PFNWGLALLOCATEMEMORYNVPROC = CFUNCTYPE(POINTER(c_void), GLsizei, GLfloat, GLfloat, GLfloat) # http://developer.download.nvidia.com/opengl/includes/wglext.h:516
PFNWGLFREEMEMORYNVPROC = CFUNCTYPE(None, POINTER(None)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:517
# 3DFX_multisample (http://developer.download.nvidia.com/opengl/includes/wglext.h:520)
WGL_3DFX_multisample = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:521
# EXT_multisample (http://developer.download.nvidia.com/opengl/includes/wglext.h:524)
WGL_EXT_multisample = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:525
# OML_sync_control (http://developer.download.nvidia.com/opengl/includes/wglext.h:528)
WGL_OML_sync_control = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:529
INT64 = c_longlong # C:\cygwin\home\alex\projects\pyglet\tools\wgl.h:42
# http://developer.download.nvidia.com/opengl/includes/wglext.h:531
wglGetSyncValuesOML = _link_function('wglGetSyncValuesOML', BOOL, [HDC, POINTER(INT64), POINTER(INT64), POINTER(INT64)], 'OML_sync_control')
INT32 = c_int # C:\cygwin\home\alex\projects\pyglet\tools\wgl.h:35
# http://developer.download.nvidia.com/opengl/includes/wglext.h:532
wglGetMscRateOML = _link_function('wglGetMscRateOML', BOOL, [HDC, POINTER(INT32), POINTER(INT32)], 'OML_sync_control')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:533
wglSwapBuffersMscOML = _link_function('wglSwapBuffersMscOML', INT64, [HDC, INT64, INT64, INT64], 'OML_sync_control')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:534
wglSwapLayerBuffersMscOML = _link_function('wglSwapLayerBuffersMscOML', INT64, [HDC, c_int, INT64, INT64, INT64], 'OML_sync_control')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:535
wglWaitForMscOML = _link_function('wglWaitForMscOML', BOOL, [HDC, INT64, INT64, INT64, POINTER(INT64), POINTER(INT64), POINTER(INT64)], 'OML_sync_control')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:536
wglWaitForSbcOML = _link_function('wglWaitForSbcOML', BOOL, [HDC, INT64, POINTER(INT64), POINTER(INT64), POINTER(INT64)], 'OML_sync_control')
PFNWGLGETSYNCVALUESOMLPROC = CFUNCTYPE(BOOL, HDC, POINTER(INT64), POINTER(INT64), POINTER(INT64)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:538
PFNWGLGETMSCRATEOMLPROC = CFUNCTYPE(BOOL, HDC, POINTER(INT32), POINTER(INT32)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:539
PFNWGLSWAPBUFFERSMSCOMLPROC = CFUNCTYPE(INT64, HDC, INT64, INT64, INT64) # http://developer.download.nvidia.com/opengl/includes/wglext.h:540
PFNWGLSWAPLAYERBUFFERSMSCOMLPROC = CFUNCTYPE(INT64, HDC, c_int, INT64, INT64, INT64) # http://developer.download.nvidia.com/opengl/includes/wglext.h:541
PFNWGLWAITFORMSCOMLPROC = CFUNCTYPE(BOOL, HDC, INT64, INT64, INT64, POINTER(INT64), POINTER(INT64), POINTER(INT64)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:542
PFNWGLWAITFORSBCOMLPROC = CFUNCTYPE(BOOL, HDC, INT64, POINTER(INT64), POINTER(INT64), POINTER(INT64)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:543
# I3D_digital_video_control (http://developer.download.nvidia.com/opengl/includes/wglext.h:546)
WGL_I3D_digital_video_control = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:547
# http://developer.download.nvidia.com/opengl/includes/wglext.h:549
wglGetDigitalVideoParametersI3D = _link_function('wglGetDigitalVideoParametersI3D', BOOL, [HDC, c_int, POINTER(c_int)], 'I3D_digital_video_control')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:550
wglSetDigitalVideoParametersI3D = _link_function('wglSetDigitalVideoParametersI3D', BOOL, [HDC, c_int, POINTER(c_int)], 'I3D_digital_video_control')
PFNWGLGETDIGITALVIDEOPARAMETERSI3DPROC = CFUNCTYPE(BOOL, HDC, c_int, POINTER(c_int)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:552
PFNWGLSETDIGITALVIDEOPARAMETERSI3DPROC = CFUNCTYPE(BOOL, HDC, c_int, POINTER(c_int)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:553
# I3D_gamma (http://developer.download.nvidia.com/opengl/includes/wglext.h:556)
WGL_I3D_gamma = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:557
# http://developer.download.nvidia.com/opengl/includes/wglext.h:559
wglGetGammaTableParametersI3D = _link_function('wglGetGammaTableParametersI3D', BOOL, [HDC, c_int, POINTER(c_int)], 'I3D_gamma')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:560
wglSetGammaTableParametersI3D = _link_function('wglSetGammaTableParametersI3D', BOOL, [HDC, c_int, POINTER(c_int)], 'I3D_gamma')
USHORT = c_ushort # C:\cygwin\home\alex\projects\pyglet\tools\wgl.h:49
# http://developer.download.nvidia.com/opengl/includes/wglext.h:561
wglGetGammaTableI3D = _link_function('wglGetGammaTableI3D', BOOL, [HDC, c_int, POINTER(USHORT), POINTER(USHORT), POINTER(USHORT)], 'I3D_gamma')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:562
wglSetGammaTableI3D = _link_function('wglSetGammaTableI3D', BOOL, [HDC, c_int, POINTER(USHORT), POINTER(USHORT), POINTER(USHORT)], 'I3D_gamma')
PFNWGLGETGAMMATABLEPARAMETERSI3DPROC = CFUNCTYPE(BOOL, HDC, c_int, POINTER(c_int)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:564
PFNWGLSETGAMMATABLEPARAMETERSI3DPROC = CFUNCTYPE(BOOL, HDC, c_int, POINTER(c_int)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:565
PFNWGLGETGAMMATABLEI3DPROC = CFUNCTYPE(BOOL, HDC, c_int, POINTER(USHORT), POINTER(USHORT), POINTER(USHORT)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:566
PFNWGLSETGAMMATABLEI3DPROC = CFUNCTYPE(BOOL, HDC, c_int, POINTER(USHORT), POINTER(USHORT), POINTER(USHORT)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:567
# I3D_genlock (http://developer.download.nvidia.com/opengl/includes/wglext.h:570)
WGL_I3D_genlock = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:571
# http://developer.download.nvidia.com/opengl/includes/wglext.h:573
wglEnableGenlockI3D = _link_function('wglEnableGenlockI3D', BOOL, [HDC], 'I3D_genlock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:574
wglDisableGenlockI3D = _link_function('wglDisableGenlockI3D', BOOL, [HDC], 'I3D_genlock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:575
wglIsEnabledGenlockI3D = _link_function('wglIsEnabledGenlockI3D', BOOL, [HDC, POINTER(BOOL)], 'I3D_genlock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:576
wglGenlockSourceI3D = _link_function('wglGenlockSourceI3D', BOOL, [HDC, UINT], 'I3D_genlock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:577
wglGetGenlockSourceI3D = _link_function('wglGetGenlockSourceI3D', BOOL, [HDC, POINTER(UINT)], 'I3D_genlock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:578
wglGenlockSourceEdgeI3D = _link_function('wglGenlockSourceEdgeI3D', BOOL, [HDC, UINT], 'I3D_genlock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:579
wglGetGenlockSourceEdgeI3D = _link_function('wglGetGenlockSourceEdgeI3D', BOOL, [HDC, POINTER(UINT)], 'I3D_genlock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:580
wglGenlockSampleRateI3D = _link_function('wglGenlockSampleRateI3D', BOOL, [HDC, UINT], 'I3D_genlock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:581
wglGetGenlockSampleRateI3D = _link_function('wglGetGenlockSampleRateI3D', BOOL, [HDC, POINTER(UINT)], 'I3D_genlock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:582
wglGenlockSourceDelayI3D = _link_function('wglGenlockSourceDelayI3D', BOOL, [HDC, UINT], 'I3D_genlock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:583
wglGetGenlockSourceDelayI3D = _link_function('wglGetGenlockSourceDelayI3D', BOOL, [HDC, POINTER(UINT)], 'I3D_genlock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:584
wglQueryGenlockMaxSourceDelayI3D = _link_function('wglQueryGenlockMaxSourceDelayI3D', BOOL, [HDC, POINTER(UINT), POINTER(UINT)], 'I3D_genlock')
PFNWGLENABLEGENLOCKI3DPROC = CFUNCTYPE(BOOL, HDC) # http://developer.download.nvidia.com/opengl/includes/wglext.h:586
PFNWGLDISABLEGENLOCKI3DPROC = CFUNCTYPE(BOOL, HDC) # http://developer.download.nvidia.com/opengl/includes/wglext.h:587
PFNWGLISENABLEDGENLOCKI3DPROC = CFUNCTYPE(BOOL, HDC, POINTER(BOOL)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:588
PFNWGLGENLOCKSOURCEI3DPROC = CFUNCTYPE(BOOL, HDC, UINT) # http://developer.download.nvidia.com/opengl/includes/wglext.h:589
PFNWGLGETGENLOCKSOURCEI3DPROC = CFUNCTYPE(BOOL, HDC, POINTER(UINT)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:590
PFNWGLGENLOCKSOURCEEDGEI3DPROC = CFUNCTYPE(BOOL, HDC, UINT) # http://developer.download.nvidia.com/opengl/includes/wglext.h:591
PFNWGLGETGENLOCKSOURCEEDGEI3DPROC = CFUNCTYPE(BOOL, HDC, POINTER(UINT)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:592
PFNWGLGENLOCKSAMPLERATEI3DPROC = CFUNCTYPE(BOOL, HDC, UINT) # http://developer.download.nvidia.com/opengl/includes/wglext.h:593
PFNWGLGETGENLOCKSAMPLERATEI3DPROC = CFUNCTYPE(BOOL, HDC, POINTER(UINT)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:594
PFNWGLGENLOCKSOURCEDELAYI3DPROC = CFUNCTYPE(BOOL, HDC, UINT) # http://developer.download.nvidia.com/opengl/includes/wglext.h:595
PFNWGLGETGENLOCKSOURCEDELAYI3DPROC = CFUNCTYPE(BOOL, HDC, POINTER(UINT)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:596
PFNWGLQUERYGENLOCKMAXSOURCEDELAYI3DPROC = CFUNCTYPE(BOOL, HDC, POINTER(UINT), POINTER(UINT)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:597
# I3D_image_buffer (http://developer.download.nvidia.com/opengl/includes/wglext.h:600)
WGL_I3D_image_buffer = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:601
LPVOID = POINTER(None) # C:\cygwin\home\alex\projects\pyglet\tools\wgl.h:45
# http://developer.download.nvidia.com/opengl/includes/wglext.h:603
wglCreateImageBufferI3D = _link_function('wglCreateImageBufferI3D', LPVOID, [HDC, DWORD, UINT], 'I3D_image_buffer')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:604
wglDestroyImageBufferI3D = _link_function('wglDestroyImageBufferI3D', BOOL, [HDC, LPVOID], 'I3D_image_buffer')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:605
wglAssociateImageBufferEventsI3D = _link_function('wglAssociateImageBufferEventsI3D', BOOL, [HDC, POINTER(HANDLE), POINTER(LPVOID), POINTER(DWORD), UINT], 'I3D_image_buffer')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:606
wglReleaseImageBufferEventsI3D = _link_function('wglReleaseImageBufferEventsI3D', BOOL, [HDC, POINTER(LPVOID), UINT], 'I3D_image_buffer')
PFNWGLCREATEIMAGEBUFFERI3DPROC = CFUNCTYPE(LPVOID, HDC, DWORD, UINT) # http://developer.download.nvidia.com/opengl/includes/wglext.h:608
PFNWGLDESTROYIMAGEBUFFERI3DPROC = CFUNCTYPE(BOOL, HDC, LPVOID) # http://developer.download.nvidia.com/opengl/includes/wglext.h:609
PFNWGLASSOCIATEIMAGEBUFFEREVENTSI3DPROC = CFUNCTYPE(BOOL, HDC, POINTER(HANDLE), POINTER(LPVOID), POINTER(DWORD), UINT) # http://developer.download.nvidia.com/opengl/includes/wglext.h:610
PFNWGLRELEASEIMAGEBUFFEREVENTSI3DPROC = CFUNCTYPE(BOOL, HDC, POINTER(LPVOID), UINT) # http://developer.download.nvidia.com/opengl/includes/wglext.h:611
# I3D_swap_frame_lock (http://developer.download.nvidia.com/opengl/includes/wglext.h:614)
WGL_I3D_swap_frame_lock = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:615
# http://developer.download.nvidia.com/opengl/includes/wglext.h:617
wglEnableFrameLockI3D = _link_function('wglEnableFrameLockI3D', BOOL, [], 'I3D_swap_frame_lock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:618
wglDisableFrameLockI3D = _link_function('wglDisableFrameLockI3D', BOOL, [], 'I3D_swap_frame_lock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:619
wglIsEnabledFrameLockI3D = _link_function('wglIsEnabledFrameLockI3D', BOOL, [POINTER(BOOL)], 'I3D_swap_frame_lock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:620
wglQueryFrameLockMasterI3D = _link_function('wglQueryFrameLockMasterI3D', BOOL, [POINTER(BOOL)], 'I3D_swap_frame_lock')
PFNWGLENABLEFRAMELOCKI3DPROC = CFUNCTYPE(BOOL) # http://developer.download.nvidia.com/opengl/includes/wglext.h:622
PFNWGLDISABLEFRAMELOCKI3DPROC = CFUNCTYPE(BOOL) # http://developer.download.nvidia.com/opengl/includes/wglext.h:623
PFNWGLISENABLEDFRAMELOCKI3DPROC = CFUNCTYPE(BOOL, POINTER(BOOL)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:624
PFNWGLQUERYFRAMELOCKMASTERI3DPROC = CFUNCTYPE(BOOL, POINTER(BOOL)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:625
# I3D_swap_frame_usage (http://developer.download.nvidia.com/opengl/includes/wglext.h:628)
WGL_I3D_swap_frame_usage = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:629
# http://developer.download.nvidia.com/opengl/includes/wglext.h:631
wglGetFrameUsageI3D = _link_function('wglGetFrameUsageI3D', BOOL, [POINTER(c_float)], 'I3D_swap_frame_usage')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:632
wglBeginFrameTrackingI3D = _link_function('wglBeginFrameTrackingI3D', BOOL, [], 'I3D_swap_frame_usage')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:633
wglEndFrameTrackingI3D = _link_function('wglEndFrameTrackingI3D', BOOL, [], 'I3D_swap_frame_usage')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:634
wglQueryFrameTrackingI3D = _link_function('wglQueryFrameTrackingI3D', BOOL, [POINTER(DWORD), POINTER(DWORD), POINTER(c_float)], 'I3D_swap_frame_usage')
PFNWGLGETFRAMEUSAGEI3DPROC = CFUNCTYPE(BOOL, POINTER(c_float)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:636
PFNWGLBEGINFRAMETRACKINGI3DPROC = CFUNCTYPE(BOOL) # http://developer.download.nvidia.com/opengl/includes/wglext.h:637
PFNWGLENDFRAMETRACKINGI3DPROC = CFUNCTYPE(BOOL) # http://developer.download.nvidia.com/opengl/includes/wglext.h:638
PFNWGLQUERYFRAMETRACKINGI3DPROC = CFUNCTYPE(BOOL, POINTER(DWORD), POINTER(DWORD), POINTER(c_float)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:639
# ATI_pixel_format_float (http://developer.download.nvidia.com/opengl/includes/wglext.h:642)
WGL_ATI_pixel_format_float = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:643
# NV_render_depth_texture (http://developer.download.nvidia.com/opengl/includes/wglext.h:646)
WGL_NV_render_depth_texture = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:647
# NV_render_texture_rectangle (http://developer.download.nvidia.com/opengl/includes/wglext.h:650)
WGL_NV_render_texture_rectangle = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:651
# NV_float_buffer (http://developer.download.nvidia.com/opengl/includes/wglext.h:654)
WGL_NV_float_buffer = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:655
# NV_swap_group (http://developer.download.nvidia.com/opengl/includes/wglext.h:658)
WGL_NV_swap_group = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:659
# http://developer.download.nvidia.com/opengl/includes/wglext.h:661
wglJoinSwapGroupNV = _link_function('wglJoinSwapGroupNV', BOOL, [HDC, GLuint], 'NV_swap_group')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:662
wglBindSwapBarrierNV = _link_function('wglBindSwapBarrierNV', BOOL, [GLuint, GLuint], 'NV_swap_group')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:663
wglQuerySwapGroupNV = _link_function('wglQuerySwapGroupNV', BOOL, [HDC, POINTER(GLuint), POINTER(GLuint)], 'NV_swap_group')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:664
wglQueryMaxSwapGroupsNV = _link_function('wglQueryMaxSwapGroupsNV', BOOL, [HDC, POINTER(GLuint), POINTER(GLuint)], 'NV_swap_group')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:665
wglQueryFrameCountNV = _link_function('wglQueryFrameCountNV', BOOL, [HDC, POINTER(GLuint)], 'NV_swap_group')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:666
wglResetFrameCountNV = _link_function('wglResetFrameCountNV', BOOL, [HDC], 'NV_swap_group')
PFNWGLJOINSWAPGROUPNVPROC = CFUNCTYPE(BOOL, HDC, GLuint) # http://developer.download.nvidia.com/opengl/includes/wglext.h:668
PFNWGLBINDSWAPBARRIERNVPROC = CFUNCTYPE(BOOL, GLuint, GLuint) # http://developer.download.nvidia.com/opengl/includes/wglext.h:669
PFNWGLQUERYSWAPGROUPNVPROC = CFUNCTYPE(BOOL, HDC, POINTER(GLuint), POINTER(GLuint)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:670
PFNWGLQUERYMAXSWAPGROUPSNVPROC = CFUNCTYPE(BOOL, HDC, POINTER(GLuint), POINTER(GLuint)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:671
PFNWGLQUERYFRAMECOUNTNVPROC = CFUNCTYPE(BOOL, HDC, POINTER(GLuint)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:672
PFNWGLRESETFRAMECOUNTNVPROC = CFUNCTYPE(BOOL, HDC) # http://developer.download.nvidia.com/opengl/includes/wglext.h:673
# NV_gpu_affinity (http://developer.download.nvidia.com/opengl/includes/wglext.h:676)
WGL_NV_gpu_affinity = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:677
# http://developer.download.nvidia.com/opengl/includes/wglext.h:679
wglEnumGpusNV = _link_function('wglEnumGpusNV', BOOL, [UINT, POINTER(HGPUNV)], 'NV_gpu_affinity')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:680
wglEnumGpuDevicesNV = _link_function('wglEnumGpuDevicesNV', BOOL, [HGPUNV, UINT, PGPU_DEVICE], 'NV_gpu_affinity')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:681
wglCreateAffinityDCNV = _link_function('wglCreateAffinityDCNV', HDC, [POINTER(HGPUNV)], 'NV_gpu_affinity')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:682
wglEnumGpusFromAffinityDCNV = _link_function('wglEnumGpusFromAffinityDCNV', BOOL, [HDC, UINT, POINTER(HGPUNV)], 'NV_gpu_affinity')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:683
wglDeleteDCNV = _link_function('wglDeleteDCNV', BOOL, [HDC], 'NV_gpu_affinity')
__all__ = ['WIN32_LEAN_AND_MEAN', 'WGL_WGLEXT_VERSION',
'WGL_FRONT_COLOR_BUFFER_BIT_ARB', 'WGL_BACK_COLOR_BUFFER_BIT_ARB',
'WGL_DEPTH_BUFFER_BIT_ARB', 'WGL_STENCIL_BUFFER_BIT_ARB',
'WGL_SAMPLE_BUFFERS_ARB', 'WGL_SAMPLES_ARB', 'WGL_NUMBER_PIXEL_FORMATS_ARB',
'WGL_DRAW_TO_WINDOW_ARB', 'WGL_DRAW_TO_BITMAP_ARB', 'WGL_ACCELERATION_ARB',
'WGL_NEED_PALETTE_ARB', 'WGL_NEED_SYSTEM_PALETTE_ARB',
'WGL_SWAP_LAYER_BUFFERS_ARB', 'WGL_SWAP_METHOD_ARB',
'WGL_NUMBER_OVERLAYS_ARB', 'WGL_NUMBER_UNDERLAYS_ARB', 'WGL_TRANSPARENT_ARB',
'WGL_TRANSPARENT_RED_VALUE_ARB', 'WGL_TRANSPARENT_GREEN_VALUE_ARB',
'WGL_TRANSPARENT_BLUE_VALUE_ARB', 'WGL_TRANSPARENT_ALPHA_VALUE_ARB',
'WGL_TRANSPARENT_INDEX_VALUE_ARB', 'WGL_SHARE_DEPTH_ARB',
'WGL_SHARE_STENCIL_ARB', 'WGL_SHARE_ACCUM_ARB', 'WGL_SUPPORT_GDI_ARB',
'WGL_SUPPORT_OPENGL_ARB', 'WGL_DOUBLE_BUFFER_ARB', 'WGL_STEREO_ARB',
'WGL_PIXEL_TYPE_ARB', 'WGL_COLOR_BITS_ARB', 'WGL_RED_BITS_ARB',
'WGL_RED_SHIFT_ARB', 'WGL_GREEN_BITS_ARB', 'WGL_GREEN_SHIFT_ARB',
'WGL_BLUE_BITS_ARB', 'WGL_BLUE_SHIFT_ARB', 'WGL_ALPHA_BITS_ARB',
'WGL_ALPHA_SHIFT_ARB', 'WGL_ACCUM_BITS_ARB', 'WGL_ACCUM_RED_BITS_ARB',
'WGL_ACCUM_GREEN_BITS_ARB', 'WGL_ACCUM_BLUE_BITS_ARB',
'WGL_ACCUM_ALPHA_BITS_ARB', 'WGL_DEPTH_BITS_ARB', 'WGL_STENCIL_BITS_ARB',
'WGL_AUX_BUFFERS_ARB', 'WGL_NO_ACCELERATION_ARB',
'WGL_GENERIC_ACCELERATION_ARB', 'WGL_FULL_ACCELERATION_ARB',
'WGL_SWAP_EXCHANGE_ARB', 'WGL_SWAP_COPY_ARB', 'WGL_SWAP_UNDEFINED_ARB',
'WGL_TYPE_RGBA_ARB', 'WGL_TYPE_COLORINDEX_ARB',
'ERROR_INVALID_PIXEL_TYPE_ARB', 'ERROR_INCOMPATIBLE_DEVICE_CONTEXTS_ARB',
'WGL_DRAW_TO_PBUFFER_ARB', 'WGL_MAX_PBUFFER_PIXELS_ARB',
'WGL_MAX_PBUFFER_WIDTH_ARB', 'WGL_MAX_PBUFFER_HEIGHT_ARB',
'WGL_PBUFFER_LARGEST_ARB', 'WGL_PBUFFER_WIDTH_ARB', 'WGL_PBUFFER_HEIGHT_ARB',
'WGL_PBUFFER_LOST_ARB', 'WGL_BIND_TO_TEXTURE_RGB_ARB',
'WGL_BIND_TO_TEXTURE_RGBA_ARB', 'WGL_TEXTURE_FORMAT_ARB',
'WGL_TEXTURE_TARGET_ARB', 'WGL_MIPMAP_TEXTURE_ARB', 'WGL_TEXTURE_RGB_ARB',
'WGL_TEXTURE_RGBA_ARB', 'WGL_NO_TEXTURE_ARB', 'WGL_TEXTURE_CUBE_MAP_ARB',
'WGL_TEXTURE_1D_ARB', 'WGL_TEXTURE_2D_ARB', 'WGL_MIPMAP_LEVEL_ARB',
'WGL_CUBE_MAP_FACE_ARB', 'WGL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB',
'WGL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB', 'WGL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB',
'WGL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB', 'WGL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB',
'WGL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB', 'WGL_FRONT_LEFT_ARB',
'WGL_FRONT_RIGHT_ARB', 'WGL_BACK_LEFT_ARB', 'WGL_BACK_RIGHT_ARB',
'WGL_AUX0_ARB', 'WGL_AUX1_ARB', 'WGL_AUX2_ARB', 'WGL_AUX3_ARB',
'WGL_AUX4_ARB', 'WGL_AUX5_ARB', 'WGL_AUX6_ARB', 'WGL_AUX7_ARB',
'WGL_AUX8_ARB', 'WGL_AUX9_ARB', 'WGL_TYPE_RGBA_FLOAT_ARB',
'ERROR_INVALID_PIXEL_TYPE_EXT', 'WGL_NUMBER_PIXEL_FORMATS_EXT',
'WGL_DRAW_TO_WINDOW_EXT', 'WGL_DRAW_TO_BITMAP_EXT', 'WGL_ACCELERATION_EXT',
'WGL_NEED_PALETTE_EXT', 'WGL_NEED_SYSTEM_PALETTE_EXT',
'WGL_SWAP_LAYER_BUFFERS_EXT', 'WGL_SWAP_METHOD_EXT',
'WGL_NUMBER_OVERLAYS_EXT', 'WGL_NUMBER_UNDERLAYS_EXT', 'WGL_TRANSPARENT_EXT',
'WGL_TRANSPARENT_VALUE_EXT', 'WGL_SHARE_DEPTH_EXT', 'WGL_SHARE_STENCIL_EXT',
'WGL_SHARE_ACCUM_EXT', 'WGL_SUPPORT_GDI_EXT', 'WGL_SUPPORT_OPENGL_EXT',
'WGL_DOUBLE_BUFFER_EXT', 'WGL_STEREO_EXT', 'WGL_PIXEL_TYPE_EXT',
'WGL_COLOR_BITS_EXT', 'WGL_RED_BITS_EXT', 'WGL_RED_SHIFT_EXT',
'WGL_GREEN_BITS_EXT', 'WGL_GREEN_SHIFT_EXT', 'WGL_BLUE_BITS_EXT',
'WGL_BLUE_SHIFT_EXT', 'WGL_ALPHA_BITS_EXT', 'WGL_ALPHA_SHIFT_EXT',
'WGL_ACCUM_BITS_EXT', 'WGL_ACCUM_RED_BITS_EXT', 'WGL_ACCUM_GREEN_BITS_EXT',
'WGL_ACCUM_BLUE_BITS_EXT', 'WGL_ACCUM_ALPHA_BITS_EXT', 'WGL_DEPTH_BITS_EXT',
'WGL_STENCIL_BITS_EXT', 'WGL_AUX_BUFFERS_EXT', 'WGL_NO_ACCELERATION_EXT',
'WGL_GENERIC_ACCELERATION_EXT', 'WGL_FULL_ACCELERATION_EXT',
'WGL_SWAP_EXCHANGE_EXT', 'WGL_SWAP_COPY_EXT', 'WGL_SWAP_UNDEFINED_EXT',
'WGL_TYPE_RGBA_EXT', 'WGL_TYPE_COLORINDEX_EXT', 'WGL_DRAW_TO_PBUFFER_EXT',
'WGL_MAX_PBUFFER_PIXELS_EXT', 'WGL_MAX_PBUFFER_WIDTH_EXT',
'WGL_MAX_PBUFFER_HEIGHT_EXT', 'WGL_OPTIMAL_PBUFFER_WIDTH_EXT',
'WGL_OPTIMAL_PBUFFER_HEIGHT_EXT', 'WGL_PBUFFER_LARGEST_EXT',
'WGL_PBUFFER_WIDTH_EXT', 'WGL_PBUFFER_HEIGHT_EXT', 'WGL_DEPTH_FLOAT_EXT',
'WGL_SAMPLE_BUFFERS_3DFX', 'WGL_SAMPLES_3DFX', 'WGL_SAMPLE_BUFFERS_EXT',
'WGL_SAMPLES_EXT', 'WGL_DIGITAL_VIDEO_CURSOR_ALPHA_FRAMEBUFFER_I3D',
'WGL_DIGITAL_VIDEO_CURSOR_ALPHA_VALUE_I3D',
'WGL_DIGITAL_VIDEO_CURSOR_INCLUDED_I3D',
'WGL_DIGITAL_VIDEO_GAMMA_CORRECTED_I3D', 'WGL_GAMMA_TABLE_SIZE_I3D',
'WGL_GAMMA_EXCLUDE_DESKTOP_I3D', 'WGL_GENLOCK_SOURCE_MULTIVIEW_I3D',
'WGL_GENLOCK_SOURCE_EXTENAL_SYNC_I3D', 'WGL_GENLOCK_SOURCE_EXTENAL_FIELD_I3D',
'WGL_GENLOCK_SOURCE_EXTENAL_TTL_I3D', 'WGL_GENLOCK_SOURCE_DIGITAL_SYNC_I3D',
'WGL_GENLOCK_SOURCE_DIGITAL_FIELD_I3D', 'WGL_GENLOCK_SOURCE_EDGE_FALLING_I3D',
'WGL_GENLOCK_SOURCE_EDGE_RISING_I3D', 'WGL_GENLOCK_SOURCE_EDGE_BOTH_I3D',
'WGL_IMAGE_BUFFER_MIN_ACCESS_I3D', 'WGL_IMAGE_BUFFER_LOCK_I3D',
'WGL_BIND_TO_TEXTURE_DEPTH_NV', 'WGL_BIND_TO_TEXTURE_RECTANGLE_DEPTH_NV',
'WGL_DEPTH_TEXTURE_FORMAT_NV', 'WGL_TEXTURE_DEPTH_COMPONENT_NV',
'WGL_DEPTH_COMPONENT_NV', 'WGL_BIND_TO_TEXTURE_RECTANGLE_RGB_NV',
'WGL_BIND_TO_TEXTURE_RECTANGLE_RGBA_NV', 'WGL_TEXTURE_RECTANGLE_NV',
'WGL_TYPE_RGBA_FLOAT_ATI', 'WGL_RGBA_FLOAT_MODE_ATI',
'WGL_COLOR_CLEAR_UNCLAMPED_VALUE_ATI', 'WGL_FLOAT_COMPONENTS_NV',
'WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_R_NV',
'WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RG_NV',
'WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RGB_NV',
'WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RGBA_NV', 'WGL_TEXTURE_FLOAT_R_NV',
'WGL_TEXTURE_FLOAT_RG_NV', 'WGL_TEXTURE_FLOAT_RGB_NV',
'WGL_TEXTURE_FLOAT_RGBA_NV', 'WGL_ERROR_INCOMPATIBLE_AFFINITY_MASKS_NV',
'WGL_ERROR_MISSING_AFFINITY_MASK_NV', 'HPBUFFERARB', 'HPBUFFEREXT', 'HGPUNV',
'GPU_DEVICE', 'PGPU_DEVICE', 'WGL_ARB_buffer_region',
'wglCreateBufferRegionARB', 'wglDeleteBufferRegionARB',
'wglSaveBufferRegionARB', 'wglRestoreBufferRegionARB',
'PFNWGLCREATEBUFFERREGIONARBPROC', 'PFNWGLDELETEBUFFERREGIONARBPROC',
'PFNWGLSAVEBUFFERREGIONARBPROC', 'PFNWGLRESTOREBUFFERREGIONARBPROC',
'WGL_ARB_multisample', 'WGL_ARB_extensions_string',
'wglGetExtensionsStringARB', 'PFNWGLGETEXTENSIONSSTRINGARBPROC',
'WGL_ARB_pixel_format', 'wglGetPixelFormatAttribivARB',
'wglGetPixelFormatAttribfvARB', 'wglChoosePixelFormatARB',
'PFNWGLGETPIXELFORMATATTRIBIVARBPROC', 'PFNWGLGETPIXELFORMATATTRIBFVARBPROC',
'PFNWGLCHOOSEPIXELFORMATARBPROC', 'WGL_ARB_make_current_read',
'wglMakeContextCurrentARB', 'wglGetCurrentReadDCARB',
'PFNWGLMAKECONTEXTCURRENTARBPROC', 'PFNWGLGETCURRENTREADDCARBPROC',
'WGL_ARB_pbuffer', 'wglCreatePbufferARB', 'wglGetPbufferDCARB',
'wglReleasePbufferDCARB', 'wglDestroyPbufferARB', 'wglQueryPbufferARB',
'PFNWGLCREATEPBUFFERARBPROC', 'PFNWGLGETPBUFFERDCARBPROC',
'PFNWGLRELEASEPBUFFERDCARBPROC', 'PFNWGLDESTROYPBUFFERARBPROC',
'PFNWGLQUERYPBUFFERARBPROC', 'WGL_ARB_render_texture', 'wglBindTexImageARB',
'wglReleaseTexImageARB', 'wglSetPbufferAttribARB',
'PFNWGLBINDTEXIMAGEARBPROC', 'PFNWGLRELEASETEXIMAGEARBPROC',
'PFNWGLSETPBUFFERATTRIBARBPROC', 'WGL_ARB_pixel_format_float',
'WGL_EXT_display_color_table', 'wglCreateDisplayColorTableEXT',
'wglLoadDisplayColorTableEXT', 'wglBindDisplayColorTableEXT',
'wglDestroyDisplayColorTableEXT', 'PFNWGLCREATEDISPLAYCOLORTABLEEXTPROC',
'PFNWGLLOADDISPLAYCOLORTABLEEXTPROC', 'PFNWGLBINDDISPLAYCOLORTABLEEXTPROC',
'PFNWGLDESTROYDISPLAYCOLORTABLEEXTPROC', 'WGL_EXT_extensions_string',
'wglGetExtensionsStringEXT', 'PFNWGLGETEXTENSIONSSTRINGEXTPROC',
'WGL_EXT_make_current_read', 'wglMakeContextCurrentEXT',
'wglGetCurrentReadDCEXT', 'PFNWGLMAKECONTEXTCURRENTEXTPROC',
'PFNWGLGETCURRENTREADDCEXTPROC', 'WGL_EXT_pbuffer', 'wglCreatePbufferEXT',
'wglGetPbufferDCEXT', 'wglReleasePbufferDCEXT', 'wglDestroyPbufferEXT',
'wglQueryPbufferEXT', 'PFNWGLCREATEPBUFFEREXTPROC',
'PFNWGLGETPBUFFERDCEXTPROC', 'PFNWGLRELEASEPBUFFERDCEXTPROC',
'PFNWGLDESTROYPBUFFEREXTPROC', 'PFNWGLQUERYPBUFFEREXTPROC',
'WGL_EXT_pixel_format', 'wglGetPixelFormatAttribivEXT',
'wglGetPixelFormatAttribfvEXT', 'wglChoosePixelFormatEXT',
'PFNWGLGETPIXELFORMATATTRIBIVEXTPROC', 'PFNWGLGETPIXELFORMATATTRIBFVEXTPROC',
'PFNWGLCHOOSEPIXELFORMATEXTPROC', 'WGL_EXT_swap_control',
'wglSwapIntervalEXT', 'wglGetSwapIntervalEXT', 'PFNWGLSWAPINTERVALEXTPROC',
'PFNWGLGETSWAPINTERVALEXTPROC', 'WGL_EXT_depth_float',
'WGL_NV_vertex_array_range', 'wglAllocateMemoryNV', 'wglFreeMemoryNV',
'PFNWGLALLOCATEMEMORYNVPROC', 'PFNWGLFREEMEMORYNVPROC',
'WGL_3DFX_multisample', 'WGL_EXT_multisample', 'WGL_OML_sync_control',
'wglGetSyncValuesOML', 'wglGetMscRateOML', 'wglSwapBuffersMscOML',
'wglSwapLayerBuffersMscOML', 'wglWaitForMscOML', 'wglWaitForSbcOML',
'PFNWGLGETSYNCVALUESOMLPROC', 'PFNWGLGETMSCRATEOMLPROC',
'PFNWGLSWAPBUFFERSMSCOMLPROC', 'PFNWGLSWAPLAYERBUFFERSMSCOMLPROC',
'PFNWGLWAITFORMSCOMLPROC', 'PFNWGLWAITFORSBCOMLPROC',
'WGL_I3D_digital_video_control', 'wglGetDigitalVideoParametersI3D',
'wglSetDigitalVideoParametersI3D', 'PFNWGLGETDIGITALVIDEOPARAMETERSI3DPROC',
'PFNWGLSETDIGITALVIDEOPARAMETERSI3DPROC', 'WGL_I3D_gamma',
'wglGetGammaTableParametersI3D', 'wglSetGammaTableParametersI3D',
'wglGetGammaTableI3D', 'wglSetGammaTableI3D',
'PFNWGLGETGAMMATABLEPARAMETERSI3DPROC',
'PFNWGLSETGAMMATABLEPARAMETERSI3DPROC', 'PFNWGLGETGAMMATABLEI3DPROC',
'PFNWGLSETGAMMATABLEI3DPROC', 'WGL_I3D_genlock', 'wglEnableGenlockI3D',
'wglDisableGenlockI3D', 'wglIsEnabledGenlockI3D', 'wglGenlockSourceI3D',
'wglGetGenlockSourceI3D', 'wglGenlockSourceEdgeI3D',
'wglGetGenlockSourceEdgeI3D', 'wglGenlockSampleRateI3D',
'wglGetGenlockSampleRateI3D', 'wglGenlockSourceDelayI3D',
'wglGetGenlockSourceDelayI3D', 'wglQueryGenlockMaxSourceDelayI3D',
'PFNWGLENABLEGENLOCKI3DPROC', 'PFNWGLDISABLEGENLOCKI3DPROC',
'PFNWGLISENABLEDGENLOCKI3DPROC', 'PFNWGLGENLOCKSOURCEI3DPROC',
'PFNWGLGETGENLOCKSOURCEI3DPROC', 'PFNWGLGENLOCKSOURCEEDGEI3DPROC',
'PFNWGLGETGENLOCKSOURCEEDGEI3DPROC', 'PFNWGLGENLOCKSAMPLERATEI3DPROC',
'PFNWGLGETGENLOCKSAMPLERATEI3DPROC', 'PFNWGLGENLOCKSOURCEDELAYI3DPROC',
'PFNWGLGETGENLOCKSOURCEDELAYI3DPROC',
'PFNWGLQUERYGENLOCKMAXSOURCEDELAYI3DPROC', 'WGL_I3D_image_buffer',
'wglCreateImageBufferI3D', 'wglDestroyImageBufferI3D',
'wglAssociateImageBufferEventsI3D', 'wglReleaseImageBufferEventsI3D',
'PFNWGLCREATEIMAGEBUFFERI3DPROC', 'PFNWGLDESTROYIMAGEBUFFERI3DPROC',
'PFNWGLASSOCIATEIMAGEBUFFEREVENTSI3DPROC',
'PFNWGLRELEASEIMAGEBUFFEREVENTSI3DPROC', 'WGL_I3D_swap_frame_lock',
'wglEnableFrameLockI3D', 'wglDisableFrameLockI3D', 'wglIsEnabledFrameLockI3D',
'wglQueryFrameLockMasterI3D', 'PFNWGLENABLEFRAMELOCKI3DPROC',
'PFNWGLDISABLEFRAMELOCKI3DPROC', 'PFNWGLISENABLEDFRAMELOCKI3DPROC',
'PFNWGLQUERYFRAMELOCKMASTERI3DPROC', 'WGL_I3D_swap_frame_usage',
'wglGetFrameUsageI3D', 'wglBeginFrameTrackingI3D', 'wglEndFrameTrackingI3D',
'wglQueryFrameTrackingI3D', 'PFNWGLGETFRAMEUSAGEI3DPROC',
'PFNWGLBEGINFRAMETRACKINGI3DPROC', 'PFNWGLENDFRAMETRACKINGI3DPROC',
'PFNWGLQUERYFRAMETRACKINGI3DPROC', 'WGL_ATI_pixel_format_float',
'WGL_NV_render_depth_texture', 'WGL_NV_render_texture_rectangle',
'WGL_NV_float_buffer', 'WGL_NV_swap_group', 'wglJoinSwapGroupNV',
'wglBindSwapBarrierNV', 'wglQuerySwapGroupNV', 'wglQueryMaxSwapGroupsNV',
'wglQueryFrameCountNV', 'wglResetFrameCountNV', 'PFNWGLJOINSWAPGROUPNVPROC',
'PFNWGLBINDSWAPBARRIERNVPROC', 'PFNWGLQUERYSWAPGROUPNVPROC',
'PFNWGLQUERYMAXSWAPGROUPSNVPROC', 'PFNWGLQUERYFRAMECOUNTNVPROC',
'PFNWGLRESETFRAMECOUNTNVPROC', 'WGL_NV_gpu_affinity', 'wglEnumGpusNV',
'wglEnumGpuDevicesNV', 'wglCreateAffinityDCNV', 'wglEnumGpusFromAffinityDCNV',
'wglDeleteDCNV']
# END GENERATED CONTENT (do not edit above this line)
|
py | 7df84dccc8801cffaf495995d614334b121de9a2 | # Copyright 2018 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from adapt.context import ContextManager
from adapt.engine import IntentDeterminationEngine
from adapt.intent import IntentBuilder
__author__ = "seanfitz"
import unittest
class ContextManagerIntegrationTest(unittest.TestCase):
def setUp(self):
self.context_manager = ContextManager()
self.engine = IntentDeterminationEngine()
def testBasicContextualFollowup(self):
intent1 = IntentBuilder("TimeQueryIntent")\
.require("TimeQuery")\
.require("Location")\
.build()
intent2 = IntentBuilder("WeatherQueryIntent")\
.require("WeatherKeyword")\
.require("Location")\
.build()
self.engine.register_intent_parser(intent1)
self.engine.register_intent_parser(intent2)
self.engine.register_entity("what time is it", "TimeQuery")
self.engine.register_entity("seattle", "Location")
self.engine.register_entity("miami", "Location")
self.engine.register_entity("weather", "WeatherKeyword")
utterance1 = "what time is it in seattle"
intent = next(self.engine.determine_intent(utterance1, include_tags=True, context_manager=self.context_manager))
assert intent
assert intent['intent_type'] == 'TimeQueryIntent'
assert '__tags__' in intent
for tag in intent['__tags__']:
context_entity = tag.get('entities')[0]
self.context_manager.inject_context(context_entity)
utterance2 = "what's the weather like?"
intent = next(self.engine.determine_intent(utterance2, context_manager=self.context_manager))
assert intent
assert intent['intent_type'] == 'WeatherQueryIntent'
def testContextOnlyUsedOnce(self):
intent_parser = IntentBuilder("DummyIntent")\
.require("Foo")\
.optionally("Foo", "Foo2")\
.build()
context_entity = {'confidence': 1.0, 'data': [('foo', 'Foo')], 'match': 'foo', 'key': 'foo'}
self.context_manager.inject_context(context_entity)
self.engine.register_intent_parser(intent_parser)
self.engine.register_entity("foo", "Foo")
self.engine.register_entity("fop", "Foo")
intent = next(self.engine.determine_intent("foo", include_tags=True, context_manager=self.context_manager))
assert intent
assert intent['intent_type'] == "DummyIntent"
assert not (intent.get("Foo") and intent.get("Foo2"))
|
py | 7df84de08742730913a548085dfcef16c3b40d6f | import textwrap
import validation
mywidth = 70
myindent = ' '*4
wrapper = textwrap.TextWrapper(initial_indent=myindent, subsequent_indent=myindent, width=mywidth)
rsthdr = ['*','#','=','-','^','~','%']
def get_type_string(indict):
outstr = ''
if indict['type'] == 'number':
outstr = 'Float'
if 'unit' in indict.keys() and indict['unit'].lower() != 'none':
outstr+=', '+indict['unit']
elif 'units' in indict.keys() and indict['units'].lower() != 'none':
outstr+=', '+indict['units']
elif indict['type'] == 'integer':
outstr = 'Integer'
elif indict['type'] == 'boolean':
outstr = 'Boolean'
elif indict['type'] == 'string':
if 'enum' in indict.keys():
outstr = 'String from, '+str(indict['enum'])
else:
outstr = 'String'
elif indict['type'] == 'array':
outstr = 'Array of '
if indict['items']['type'] == 'number':
outstr += 'Floats'
if 'unit' in indict['items'].keys() and indict['items']['unit'].lower() != 'none':
outstr+=', '+indict['items']['unit']
elif 'units' in indict['items'].keys() and indict['items']['units'].lower() != 'none':
outstr+=', '+indict['items']['units']
elif indict['items']['type'] == 'integer':
outstr += 'Integers'
elif indict['items']['type'] == 'string':
outstr += 'Strings'
elif indict['items']['type'] == 'boolean':
outstr += 'Booleans'
return outstr
def get_description_string(indict):
outstr = ''
if 'description' in indict.keys():
outstr += wrapper.fill(indict['description'])
if 'default' in indict.keys():
outstr += '\n\n'+myindent + '*Default* = '+str(indict['default'])
if 'minimum' in indict.keys():
outstr += '\n\n'+myindent + '*Minimum* = '+str(indict['minimum'])
elif (indict['type'] == 'array' and 'minimum' in indict['items'].keys()):
outstr += '\n\n'+myindent + '*Minimum* = '+str(indict['items']['minimum'])
if 'maximum' in indict.keys():
outstr += myindent + '*Maximum* = '+str(indict['maximum'])+'\n'
elif (indict['type'] == 'array' and 'maximum' in indict['items'].keys()):
outstr += '\n\n'+myindent + '*Maximum* = '+str(indict['items']['maximum'])
outstr += '\n'
return outstr
class Schema2RST(object):
def __init__(self, fname):
self.fname = fname
self.fout = fname.replace('.yaml','.rst')
self.yaml = validation.load_yaml( fname )
self.f = None
def write_rst(self):
self.f = open(self.fout, 'w')
self.write_header()
self.write_loop(self.yaml['properties'], 0, self.fname.replace('yaml',''))
self.f.close()
def write_header(self):
self.f.write('*'*30+'\n')
self.f.write(self.fname+'\n')
self.f.write('*'*30+'\n')
if 'description' in self.yaml.keys():
self.f.write(self.yaml['description']+'\n')
def write_loop(self, rv, idepth, name, desc=None):
self.f.write('\n')
self.f.write('\n')
self.f.write(name+'\n')
print(idepth)
if idepth > 0: self.f.write(rsthdr[idepth-1]*40+'\n')
self.f.write('\n')
if not desc is None: self.f.write(desc+'\n')
for k in rv.keys():
print(k)
try:
if 'type' in rv[k]:
if rv[k]['type'] == 'object' and 'properties' in rv[k].keys():
k_desc = None if not 'description' in rv[k] else rv[k]['description']
self.write_loop(rv[k]['properties'], idepth+1, k, k_desc)
elif rv[k]['type'].lower() in ['number','integer','string','boolean']:
self.f.write(':code:`'+k+'` : '+get_type_string( rv[k] )+'\n')
self.f.write( get_description_string( rv[k] )+'\n')
elif (rv[k]['type'].lower() == 'array' and
rv[k]['items']['type'] == 'object' and
'properties' in rv[k]['items'].keys() ):
k_desc = None if not 'description' in rv[k]['items'] else rv[k]['items']['description']
self.write_loop(rv[k]['items']['properties'], idepth+1, k, k_desc)
elif (rv[k]['type'].lower() == 'array' and
rv[k]['items']['type'] in ['number','integer','string','boolean']):
self.f.write(':code:`'+k+'` : '+get_type_string( rv[k] )+'\n')
self.f.write( get_description_string( rv[k] )+'\n')
except:
print('Error reading,',k,'in',name,', depth',idepth)
continue
if __name__ == '__main__':
for ifile in ['geometry_schema.yaml','modeling_schema.yaml','analysis_schema.yaml']:
myobj = Schema2RST(ifile)
myobj.write_rst()
|
py | 7df84e3f4f4dd7272ddd5d3b582eb7351123f7f1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The nmap wrapper is used to easily and efficiently perform nmap scans with python-nmap.
It wraps the relevant scan functions of nmap's `PortScanner`. The scan results are stored locally to
make them accessable later if needed. And this class also provides a way to scan asynchronously for
network devices at a specific address or subnet.
"""
import os
import threading
import typing
import warnings
try:
import nmap
_NMAP_IMPORTED = True
except (ImportError, ModuleNotFoundError):
_NMAP_IMPORTED = False
warnings.warn("python-nmap is not installed. Without this package you may not find all "
"ethernet devices in your local network. When installing python-nmap, do not "
"forget to install the nmap executable as well, if it is not installed, yet. And "
"make sure it is also included in the PATH environmental variable.")
from ..device import LANDevice
__all__ = ["NMAPWrapper"]
####################################################################################################
class NMAPWrapper: # pragma: no cover
"""Wrapper class for `nmap.PortScanner`. It class manages network scans via nmap and converts
the results in `Device`s.
Args:
notify_parent_done: An optional function object which is called after this scan is
performed. The function needs to accept one argument of type bool. This
argument will be True, if the scan succeeded and False, if not.
**kwargs:
- nmap_search_path: One or multiple paths where to search for the nmap executable.
"""
def __init__(self,
notify_parent_done: typing.Optional[typing.Callable[[bool], typing.Any]] = None,
**kwargs):
super().__init__()
self._nmap = None
if _NMAP_IMPORTED:
nmap_kwargs = {}
if "nmap_search_path" in kwargs:
nmap_kwargs["nmap_search_path"] = kwargs["nmap_search_path"]
try:
self._nmap = nmap.PortScanner(**nmap_kwargs)
except nmap.PortScannerError:
# An error is raised, if the nmap-executable was not found
warnings.warn("Could not create a nmap.PortScanner instance. Maybe nmap is not "
"installed on your machine or it is not specified in PATH. If nmap "
"is already installed try specifying its path with the "
"'nmap_search_path'-parameter.")
self._nmap_results = []
self._nmap_thread = None
self._notify_parent_done = notify_parent_done
@property
def valid(self) -> bool:
"""Returns True, if the nmap.PortScanner could be instantiated"""
return self._nmap is not None
@property
def raw_devices(self) -> typing.Sequence[typing.Dict]:
"""The raw search results as they are returned by a scan with nmap. The results of all
previous scans are included.
"""
return tuple(self._nmap_results)
@property
def devices(self) -> typing.Sequence[LANDevice]:
"""The results of all previous scans with namp. The raw results are converted into `Device`-
objects.
"""
raw_devices = self.raw_devices
devices = {}
for raw_device in raw_devices:
try:
# Extract ip addresses and mac address
addresses = raw_device["addresses"]
mac_address = addresses["mac"]
except KeyError:
continue
ip_addresses = []
# Append all IPv4 and IPv6 addresses
for key in ["ipv4", "ipv6"]:
try:
ip_addresses.append(addresses[key])
except KeyError:
continue
# If there is already a device for this mac address, just update the device's addresses
if mac_address in devices:
address_aliases = [ip_address for ip_address in ip_addresses
if ip_address not in devices[mac_address].all_addresses]
# Append unknown addresses to aliases
if len(address_aliases) > 0:
devices[mac_address].address_aliases = [*devices[mac_address].address_aliases,
address_aliases]
else:
dev = LANDevice()
dev.mac_address = mac_address
dev.address = ip_addresses[0]
if len(ip_addresses) > 1:
# If multiple addresses were found, add the others as aliases
dev.address_aliases = ip_addresses[1:]
devices[mac_address] = dev
return tuple(devices.values())
def clear_devices(self) -> None:
"""Deletes all previous scan results."""
self._nmap_results.clear()
def scan(self, hosts: typing.Union[str, typing.Iterable[str]]) -> bool:
"""Performs a network scan with nmap (synchronously).
Args:
hosts: One host as string or multiple hosts as iterable of strings. Multiple hosts can
also be written as single string with a space as separator. A host can use one of
the following formats to scan a single host:
- ip address (e.g. 192.168.1.10)
- hostname (e.g. localhost)
- domain (e.g. mydevice.company.com)
Or to scan a whole subnet of a local network:
- ip subnet (e.g. 192.168.1.0/24 for a 24bit netmask)
"""
return self._scan(hosts, None)
def scan_async(self, hosts: typing.Union[str, typing.Iterable[str]],
on_done: typing.Optional[typing.Callable[[bool], None]] = None) -> bool:
"""Performs a network scan with nmap asynchronously.
Args:
hosts: One host as string or multiple hosts as iterable of strings. Multiple hosts can
also be written as single string with a space as separator. A host can use one of
the following formats to scan a single host:
- ip address (e.g. 192.168.1.10)
- hostname (e.g. localhost)
- domain (e.g. mydevice.company.com)
Or to scan a whole subnet of a local network:
- ip subnet (e.g. 192.168.1.0/24 for a 24bit netmask)
on_done: An optional function object which is called after this scan is performed. The
function needs to accept one argument of type bool. This argument will be True,
if the scan succeeded and False, if not.
Returns:
bool: True, if the asynchronous scan was started. False, if a scan is already running.
"""
if self.is_scan_alive():
return False
self._nmap_thread = threading.Thread(target=self._scan, args=(hosts, on_done))
self._nmap_thread.start()
return True
def is_scan_alive(self) -> bool:
"""Checks if an asynchronous scan is still running.
Returns:
bool: True, if an asynchronous scan is running. Otherwise, false.
"""
if self._nmap_thread is None:
return False
elif self._nmap_thread.is_alive():
return True
else:
# If the thread is not alive, but not None, set it to None, because it is not needed
# anymore.
self._nmap_thread = None
return False
def wait_for_scan(self, timeout: typing.Optional[float] = None) -> bool:
"""If an asynchronous scan is running, this function waits until the scan is finished.
Args:
timeout: A floating point number specifying a timeout (maximum time to wait) in seconds.
If timeout is None, this function will block until the scan is completed.
Returns:
bool: True, if the scan is completed or not running at all. False, if the timeout
happened.
"""
# If no scan thread is alive, there is nothing to wait for
if not self.is_scan_alive():
return True
self._nmap_thread.join(timeout=timeout)
return self.is_scan_alive()
def _scan(self, hosts: typing.Union[str, typing.Iterable[str]],
on_done: typing.Optional[typing.Callable[[bool], None]]) -> bool:
"""Performs a network scan with nmap (synchronously).
Args:
hosts: One host as string or multiple hosts as iterable of strings. Multiple hosts can
also be written as single string with a space as separator. A host can use one of
the following formats to scan a single host:
- ip address (e.g. 192.168.1.10)
- hostname (e.g. localhost)
- domain (e.g. mydevice.company.com)
Or to scan a whole subnet of a local network:
- ip subnet (e.g. 192.168.1.0/24 for a 24bit netmask)
on_done: An optional function object which is called after this scan is performed. The
function needs to accept one argument of type bool. This argument will be True,
if the scan succeeded and False, if not.
"""
if self._nmap is None:
# The nmap-PortScanner could not be instantiated. So, either nmap or python-nmap are not
# installed.
warnings.warn("Could not perform a network scan with nmap. Either \"nmap\" is not "
"installed on your system or \"python-nmap\" is missing in your python "
"environment. To use the nmap features, make sure both are installed.")
return False
result = False
if not isinstance(hosts, str):
# nmap expects a single string as host-argument, multiple hosts are separated by spaces
hosts = " ".join(hosts)
try:
exception = None
for arguments in ["-sA -F --min-parallelism 1024 --privileged",
"-sT -F --min-parallelism 1024"]:
try:
# Try to perform a TCP-ACK scan, which seems to be the fastest one, but it
# requires admin privileges on linux. If the user has the requires privileges it
# should work.
self._nmap.scan(hosts, arguments=arguments)
scan_info = self._nmap.scaninfo()
if "error" in scan_info:
# The scan terminated correctly, but an stderr contained some outputs
raise nmap.PortScannerError(os.linesep.join(scan_info["error"]))
break # Success
except nmap.PortScannerError as exc:
# If an error occurs, this could be due to missing admin privileges. So the next
# element from the arguments is tried which needs less privileges.
exception = exc
else:
if exception is not None:
raise exception
# Append all new scan results
for host in self._nmap.all_hosts():
device = self._nmap[host]
# If the same device is already known, there is no need to add it again
if device not in self._nmap_results:
self._nmap_results.append(device)
result = True
except (UnicodeDecodeError, nmap.PortScannerError):
# Some error messages containing special characters cannot be decoded on windows. That
# is why the UnicodeDecodeError is caught here
pass # result = False
finally:
# Call the functions, because the scan is finished
if self._notify_parent_done is not None:
# If a callable was provided in the constructor, call it now
self._notify_parent_done(result)
if on_done is not None:
# Additionally, a callable can be passed in this function. If such a function was
# passed, call it now
on_done(result)
return result
|
py | 7df84ee122dd9ba9101b33460c828b5c6c12386d | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..stats import ActivationCount
def test_ActivationCount_inputs():
input_map = dict(
in_files=dict(
mandatory=True,
),
threshold=dict(
mandatory=True,
),
)
inputs = ActivationCount.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_ActivationCount_outputs():
output_map = dict(
acm_neg=dict(
extensions=None,
),
acm_pos=dict(
extensions=None,
),
out_file=dict(
extensions=None,
),
)
outputs = ActivationCount.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
py | 7df84f88538e297dec961774c40ef1a7fc90583b | r"""
.. _sec-autoregressive:
Autoregressive model change
====================================================================================================
Description
----------------------------------------------------------------------------------------------------
Let :math:`0<t_1<t_2<\dots<n` be unknown change points indexes.
Consider the following piecewise autoregressive model
.. math::
y_t = z_t' \delta_j + \varepsilon_t, \quad \forall t=t_j,\dots,t_{j+1}-1
where :math:`j>1` is the segment number, :math:`z_t=[y_{t-1}, y_{t-2},\dots,y_{t-p}]` is the lag vector,and :math:`p>0` is the order of the process.
The least-squares estimates of the break dates is obtained by minimizing the sum of squared
residuals :cite:`ar-Bai2000`.
Formally, the associated cost function on an interval :math:`I` is
.. math:: c(y_{I}) = \min_{\delta\in\mathbb{R}^p} \sum_{t\in I} \|y_t - \delta' z_t \|_2^2
Usage
----------------------------------------------------------------------------------------------------
Start with the usual imports and create a signal with piecewise linear trends.
.. code-block:: python
from itertools import cycle
import numpy as np
import matplotlib.pylab as plt
import ruptures as rpt
# creation of data
n = 2000
n_bkps, sigma = 4, 0.5 # number of change points, noise standart deviation
bkps = [400, 1000, 1300, 1800, n]
f1 = np.array([0.075, 0.1])
f2 = np.array([0.1, 0.125])
freqs = np.zeros((n, 2))
for sub, val in zip(np.split(freqs, bkps[:-1]), cycle([f1, f2])):
sub += val
tt = np.arange(n)
signal = np.sum((np.sin(2*np.pi*tt*f) for f in freqs.T))
signal += np.random.normal(scale=sigma, size=signal.shape)
# display signal
rpt.show.display(signal, bkps, figsize=(10, 6))
plt.show()
Then create a :class:`CostAR` instance and print the cost of the sub-signal
:code:`signal[50:150]`.
The autoregressive order can be specified through the keyword ``'order'``.
.. code-block:: python
c = rpt.costs.CostAR(order=10).fit(signal)
print(c.error(50, 150))
You can also compute the sum of costs for a given list of change points.
.. code-block:: python
print(c.sum_of_costs(bkps))
print(c.sum_of_costs([10, 100, 200, 250, n]))
In order to use this cost class in a change point detection algorithm (inheriting from
:class:`BaseEstimator`), either pass a :class:`CostAR` instance (through the argument
``'custom_cost'``) or set :code:`model="ar"`.
Additional parameters can be passed to the cost instance through the keyword ``'params'``.
.. code-block:: python
c = rpt.costs.CostAR(order=10); algo = rpt.Dynp(custom_cost=c)
# is equivalent to
algo = rpt.Dynp(model="ar", params={"order": 10})
Code explanation
----------------------------------------------------------------------------------------------------
.. autoclass:: ruptures.costs.CostAR
:members:
:special-members: __init__
.. rubric:: References
.. bibliography:: ../biblio.bib
:style: alpha
:cited:
:labelprefix: AR
:keyprefix: ar-
"""
import numpy as np
from numpy.lib.stride_tricks import as_strided
from numpy.linalg import lstsq
from ruptures.base import BaseCost
from ruptures.costs import NotEnoughPoints
class CostAR(BaseCost):
r"""
Least-squares estimate for changes in autoregressive coefficients.
"""
model = "ar"
def __init__(self, order=4):
self.signal = None
self.covar = None
self.min_size = max(5, order + 1)
self.order = order
def fit(self, signal):
"""Set parameters of the instance.
The signal must be 1D.
Args:
signal (array): 1d signal. Shape (n_samples, 1) or (n_samples,).
Returns:
self
"""
if signal.ndim == 1:
self.signal = signal.reshape(-1, 1)
else:
self.signal = signal
# lagged covariates
n_samples, _ = self.signal.shape
strides = (self.signal.itemsize, self.signal.itemsize)
shape = (n_samples - self.order, self.order)
lagged = as_strided(self.signal, shape=shape, strides=strides)
# pad the first columns
lagged_after_padding = np.pad(lagged, ((self.order, 0), (0, 0)), mode="edge")
# add intercept
self.covar = np.c_[lagged_after_padding, np.ones(n_samples)]
# pad signal on the edges
self.signal[: self.order] = self.signal[self.order]
return self
def error(self, start, end):
"""Return the approximation cost on the segment [start:end].
Args:
start (int): start of the segment
end (int): end of the segment
Returns:
float: segment cost
Raises:
NotEnoughPoints: when the segment is too short (less than ``'min_size'`` samples).
"""
if end - start < self.min_size:
raise NotEnoughPoints
y, X = self.signal[start:end], self.covar[start:end]
_, residual, _, _ = lstsq(X, y, rcond=None)
return residual.sum()
|
py | 7df84fda35c6f2f40215965947662350ceb2fa4a | from .keyboards import TestKeyboardLayout |
py | 7df84fffd4f623d5b10510a685d39cb12e8f68ba | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import pickle
import sys
import os
if len(sys.argv) < 4:
exit("USAGE: python tools/extract_detectron_weights.py \
weights_file out_dir feat_name [feat_name]")
wgts_file = sys.argv[1]
out_dir = sys.argv[2]
with open(wgts_file, 'rb') as f:
wgts = pickle.load(f, encoding='latin1')['blobs']
for i in range(3, len(sys.argv)):
feat_name = sys.argv[i]
wgt = wgts[feat_name]
out_file = os.path.join(out_dir, feat_name + ".pkl")
with open(out_file, 'wb') as w:
pickle.dump(wgt, w)
|
py | 7df8506e83a078e3ba181a3bce665e9bff6ab771 | from django.db import models
from django.conf import settings
# Create your models here.
User = settings.AUTH_USER_MODEL
class Unit(models.Model):
name = models.CharField(max_length=100)
price = models.DecimalField(max_digits=10, decimal_places=2)
occupied = models.BooleanField(default=False)
image = models.ImageField(
upload_to='musonge/', blank=True, null=True)
date_added = models.DateTimeField(auto_now_add=True)
owner = models.ForeignKey(
User, related_name='units', on_delete=models.CASCADE)
def __str__(self):
return self.name
|
py | 7df85189bbd76fd9aaf49fb32e668044d3707faa | #!/usr/bin/env python3
#-*- coding:utf-8 -*-
# 识别图片验证码。
from PIL import Image
import hashlib
import time
import os
import math
class VectorCompare:
def magnitude(self, concordance):
total = 0
for word,count in concordance.items():
total += count ** 2
return math.sqrt(total)
def relation(self, concordance1, concordance2):
relevance = 0
topvalue = 0
for word, count in concordance1.items():
if word in concordance2:#concordance2.has_key(word):
topvalue += count * concordance2[word]
return topvalue / (self.magnitude(concordance1) * self.magnitude(concordance2))
def buildvector(im):
d1 = {}
count = 0
for i in im.getdata():
d1[count] = i
count += 1
return d1
v = VectorCompare()
iconset = ['0','1','2','3','4','5','6','7','8','9','0','a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
imageset = []
for letter in iconset:
# print(letter)
# print(os.listdir('./iconset/%s/' % (letter)))
for img in os.listdir('./iconset/%s/'%(letter)):
temp = []
if img != 'Thumbs.db' and img != '.DS_Store':
temp.append(buildvector(Image.open('./iconset/%s/%s' %(letter, img))))
imageset.append({letter:temp})
#print(imageset)
im = Image.open("captcha.gif")
#将图片转换为8位像素模式
im = im.convert("P")
im2 = Image.new("P", im.size, 255)
temp = {}
# 打印颜色直方图
#print(im.histogram())
for x in range(im.size[1]):
for y in range(im.size[0]):
pix = im.getpixel((y,x))
temp[pix] = pix
if pix == 220 or pix == 227:
im2.putpixel((y,x), 0)
inletter = False
foundletter = False
start = 0
end = 0
letters = []
for y in range(im2.size[0]):
for x in range(im2.size[1]):
pix = im2.getpixel((y,x))
if pix != 255:
inletter = True
if foundletter == False and inletter == True:
foundletter = True
start = y
if foundletter == True and inletter == False:
foundletter = False
end = y
letters.append((start, end))
inletter = False
count = 0
for letter in letters:
m = hashlib.md5()
im3 = im2.crop((letter[0], 0, letter[1], im2.size[1]))
guess = []
for image in imageset:
#print (image)
for x,y in image.items():
if len(y) != 0:
guess.append((v.relation(y[0], buildvector(im3)), x))
guess.sort(reverse=True)
print(guess[0])
#print ("", guess[0])
count += 1
'''
his = im.histogram()
values = {}
for i in range(256):
values[i] = his[i]
for j,k in sorted(values.items(), key=lambda x:x[1], reverse=True)[:10]:
print(j, k)
im2.show()
'''
|
py | 7df851ae5480fe9459e7b0f4a5c70a27a4c746d4 | """
=================================
Plot topographies for MEG sensors
=================================
"""
# Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
print __doc__
import pylab as pl
from mne import fiff
from mne.layouts import Layout
from mne.viz import plot_topo
from mne.datasets import sample
data_path = sample.data_path('.')
fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
# Reading
evoked = fiff.read_evoked(fname, setno=0, baseline=(None, 0))
layout = Layout('Vectorview-all')
###############################################################################
# Show topography
plot_topo(evoked, layout)
title = 'MNE sample data (condition : %s)' % evoked.comment
pl.figtext(0.03, 0.93, title, color='w', fontsize=18)
pl.show()
|
py | 7df853da80769e4c42e7faf885370168174fc98e | """
WSGI config for BerlinCheckers project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'BerlinCheckers.settings')
application = get_wsgi_application()
|
py | 7df854869479c970f9348a9ced2413360fb8da1b | """Optimize a design according to preferences."""
import scipy.optimize
from physprog import classfunctions
from physprog import objective
from physprog import plots
def optimize(model, preferences, plot=False):
"""Optimize the given problem to specified preferences."""
constraints = get_constraints(model, preferences)
aggregate = objective.build_objective(model, preferences)
initial_performance = model.evaluate()
print('Optimizing design starting at value: {:.2f}\n{}'
''.format(aggregate(model.design), initial_performance))
scipy.optimize.minimize(
aggregate,
model.design,
constraints=constraints,
options={'disp': False})
final_performance = model.evaluate()
print('Optimal design input: {}\nParams: {}\nValue: {}'
''.format(model.design, final_performance, aggregate(
model.design)))
if plot:
plots.plot_optimization_results(
preferences,
initial_performance,
final_performance)
def get_constraints(problem, preferences):
"""
Extract constraints given a set of class functions.
Scipy needs a sequence of dicts describing inequalities that
will be forced to be >=0.
"""
print('Building constraints')
constraints = []
for funcname, func in preferences.items():
if isinstance(func, classfunctions.MustBeAbove):
# param >= cutoff implies param-cutoff>=0
def constraint(x, prob, funcname):
"""Evaluate a greater-than constraint for optimizer."""
prob.design = x
return getattr(prob, funcname)() - func.bounds.cutoff # pylint: disable=cell-var-from-loop
elif isinstance(func, classfunctions.MustBeBelow):
# param <= cutoff implies cutoff - param>=0
def constraint(x, prob, funcname):
"""Evaluate a less-than constraint for optimizer."""
prob.design = x
return func.bounds.cutoff - getattr(prob, funcname)() # pylint: disable=cell-var-from-loop
else:
constraint = None
if constraint is not None:
constraints.append({
'type': 'ineq',
'fun': constraint,
'args': (problem, funcname)
})
return constraints
|
py | 7df85519b446ff6e7cdb9a4ec37949ca4d871b67 | #
# InternetRadio E2
#
# Coded by Dr.Best (c) 2012
# Support: www.dreambox-tools.info
# E-Mail: [email protected]
#
# This plugin is open source but it is NOT free software.
#
# This plugin may only be distributed to and executed on hardware which
# is licensed by Dream Property GmbH.
# In other words:
# It's NOT allowed to distribute any parts of this plugin or its source code in ANY way
# to hardware which is NOT licensed by Dream Property GmbH.
# It's NOT allowed to execute this plugin and its source code or even parts of it in ANY way
# on hardware which is NOT licensed by Dream Property GmbH.
#
# If you want to use or modify the code or parts of it,
# you have to keep MY license and inform me about the modifications by mail.
#
from Plugins.Plugin import PluginDescriptor
from Tools.HardwareInfo import HardwareInfo
from Components.config import config, ConfigSubsection, ConfigSelection, ConfigDirectory, ConfigYesNo, Config, ConfigText
from InternetRadioScreen import InternetRadioScreen
config.plugins.internetradio = ConfigSubsection()
config.plugins.internetradio.showinextensions = ConfigYesNo(default = True)
config.plugins.internetradio.dirname = ConfigDirectory(default = "/media/hdd/streamripper/")
config.plugins.internetradio.riptosinglefile = ConfigYesNo(default = False)
config.plugins.internetradio.createdirforeachstream = ConfigYesNo(default = True)
config.plugins.internetradio.addsequenceoutputfile = ConfigYesNo(default = False)
config.plugins.internetradio.filter = ConfigText(default=_("Countries"))
if HardwareInfo().get_device_name() == "dm500hd":
config.plugins.internetradio.visualization = ConfigSelection(choices = [("2", _("On")), ("3", _("Off"))], default = "2")
else:
config.plugins.internetradio.visualization = ConfigSelection(choices = [("0", _("Screen and OLED")), ("1", _("OLED only")), ("2", _("Screen only")), ("3", _("Off"))], default = "2")
config.plugins.internetradio.googlecover = ConfigYesNo(default = False)
config.plugins.internetradio.startupname = ConfigText(default = "")
config.plugins.internetradio.startuptext = ConfigText(default = "")
config.plugins.internetradio.fullscreenautoactivation = ConfigSelection(choices = [("30", _("30 seconds")), ("60", _("1 minutes")), ("180", _("3 minutes")), ("-1", _("Off"))], default = "30")
config.plugins.internetradio.fullscreenlayout = ConfigSelection(choices = [("0", _("Visualization and Text")), ("1", _("Text only")), ("2", _("Blank"))], default = "0")
def sessionstart(reason, **kwargs):
if reason == 0 and "session" in kwargs:
try:
from Plugins.Extensions.WebInterface.WebChilds.Toplevel import addExternalChild
from Plugins.Extensions.WebInterface.WebChilds.Screenpage import ScreenPage
from twisted.python import util
from twisted.web import static
if hasattr(static.File, 'render_GET'):
class File(static.File):
def render_POST(self, request):
return self.render_GET(request)
else:
File = static.File
session = kwargs["session"]
root = File(util.sibpath(__file__, "web-data"))
root.putChild("web", ScreenPage(session, util.sibpath(__file__, "web"), True))
addExternalChild( ("internetradio", root, "Internet-Radio", "1", True) )
except ImportError:
pass # pah!
def main(session,**kwargs):
session.open(InternetRadioScreen)
def Plugins(**kwargs):
list = [PluginDescriptor(name="Internet-Radio", description=_("listen to internet-radio"), where = [PluginDescriptor.WHERE_PLUGINMENU], icon="plugin.png", fnc=main)] # always show in plugin menu
if config.plugins.internetradio.showinextensions.value:
list.append (PluginDescriptor(name="Internet-Radio", description=_("listen to internet-radio"), where = [PluginDescriptor.WHERE_EXTENSIONSMENU], fnc=main))
list.append (PluginDescriptor(where=PluginDescriptor.WHERE_SESSIONSTART, fnc=sessionstart, needsRestart=False))
return list
|
py | 7df85581725a34ba1fecaad271be8725c87c116a | # cmd.py
# Copyright (C) 2008, 2009 Michael Trier ([email protected]) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from contextlib import contextmanager
import io
import logging
import os
import signal
from subprocess import (
call,
Popen,
PIPE
)
import subprocess
import sys
import threading
from textwrap import dedent
from git.compat import (
defenc,
force_bytes,
safe_decode,
is_posix,
is_win,
)
from git.exc import CommandError
from git.util import is_cygwin_git, cygpath, expand_path, remove_password_if_present
from .exc import (
GitCommandError,
GitCommandNotFound
)
from .util import (
LazyMixin,
stream_copy,
)
# typing ---------------------------------------------------------------------------
from typing import (Any, AnyStr, BinaryIO, Callable, Dict, IO, List, Mapping,
Sequence, TYPE_CHECKING, TextIO, Tuple, Union, cast, overload)
from git.types import PathLike, Literal, TBD
if TYPE_CHECKING:
from git.repo.base import Repo
from git.diff import DiffIndex
# ---------------------------------------------------------------------------------
execute_kwargs = {'istream', 'with_extended_output',
'with_exceptions', 'as_process', 'stdout_as_string',
'output_stream', 'with_stdout', 'kill_after_timeout',
'universal_newlines', 'shell', 'env', 'max_chunk_size'}
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
__all__ = ('Git',)
# ==============================================================================
## @name Utilities
# ------------------------------------------------------------------------------
# Documentation
## @{
def handle_process_output(process: subprocess.Popen,
stdout_handler: Union[None,
Callable[[AnyStr], None],
Callable[[List[AnyStr]], None],
Callable[[bytes, 'Repo', 'DiffIndex'], None]],
stderr_handler: Union[None,
Callable[[AnyStr], None],
Callable[[List[AnyStr]], None]],
finalizer: Union[None,
Callable[[subprocess.Popen], None]] = None,
decode_streams: bool = True) -> None:
"""Registers for notifications to learn that process output is ready to read, and dispatches lines to
the respective line handlers.
This function returns once the finalizer returns
:return: result of finalizer
:param process: subprocess.Popen instance
:param stdout_handler: f(stdout_line_string), or None
:param stderr_handler: f(stderr_line_string), or None
:param finalizer: f(proc) - wait for proc to finish
:param decode_streams:
Assume stdout/stderr streams are binary and decode them before pushing \
their contents to handlers.
Set it to False if `universal_newline == True` (then streams are in text-mode)
or if decoding must happen later (i.e. for Diffs).
"""
# Use 2 "pump" threads and wait for both to finish.
def pump_stream(cmdline: str, name: str, stream: Union[BinaryIO, TextIO], is_decode: bool,
handler: Union[None, Callable[[Union[bytes, str]], None]]) -> None:
try:
for line in stream:
if handler:
if is_decode:
assert isinstance(line, bytes)
line_str = line.decode(defenc)
handler(line_str)
else:
handler(line)
except Exception as ex:
log.error("Pumping %r of cmd(%s) failed due to: %r", name, remove_password_if_present(cmdline), ex)
raise CommandError(['<%s-pump>' % name] + remove_password_if_present(cmdline), ex) from ex
finally:
stream.close()
cmdline = getattr(process, 'args', '') # PY3+ only
if not isinstance(cmdline, (tuple, list)):
cmdline = cmdline.split()
pumps = []
if process.stdout:
pumps.append(('stdout', process.stdout, stdout_handler))
if process.stderr:
pumps.append(('stderr', process.stderr, stderr_handler))
threads = []
for name, stream, handler in pumps:
t = threading.Thread(target=pump_stream,
args=(cmdline, name, stream, decode_streams, handler))
t.daemon = True
t.start()
threads.append(t)
## FIXME: Why Join?? Will block if `stdin` needs feeding...
#
for t in threads:
t.join()
if finalizer:
return finalizer(process)
else:
return None
def dashify(string: str) -> str:
return string.replace('_', '-')
def slots_to_dict(self, exclude: Sequence[str] = ()) -> Dict[str, Any]:
return {s: getattr(self, s) for s in self.__slots__ if s not in exclude}
def dict_to_slots_and__excluded_are_none(self, d: Mapping[str, Any], excluded: Sequence[str] = ()) -> None:
for k, v in d.items():
setattr(self, k, v)
for k in excluded:
setattr(self, k, None)
## -- End Utilities -- @}
# value of Windows process creation flag taken from MSDN
CREATE_NO_WINDOW = 0x08000000
## CREATE_NEW_PROCESS_GROUP is needed to allow killing it afterwards,
# see https://docs.python.org/3/library/subprocess.html#subprocess.Popen.send_signal
PROC_CREATIONFLAGS = (CREATE_NO_WINDOW | subprocess.CREATE_NEW_PROCESS_GROUP # type: ignore[attr-defined]
if is_win else 0)
class Git(LazyMixin):
"""
The Git class manages communication with the Git binary.
It provides a convenient interface to calling the Git binary, such as in::
g = Git( git_dir )
g.init() # calls 'git init' program
rval = g.ls_files() # calls 'git ls-files' program
``Debugging``
Set the GIT_PYTHON_TRACE environment variable print each invocation
of the command to stdout.
Set its value to 'full' to see details about the returned values.
"""
__slots__ = ("_working_dir", "cat_file_all", "cat_file_header", "_version_info",
"_git_options", "_persistent_git_options", "_environment")
_excluded_ = ('cat_file_all', 'cat_file_header', '_version_info')
def __getstate__(self) -> Dict[str, Any]:
return slots_to_dict(self, exclude=self._excluded_)
def __setstate__(self, d) -> None:
dict_to_slots_and__excluded_are_none(self, d, excluded=self._excluded_)
# CONFIGURATION
git_exec_name = "git" # default that should work on linux and windows
# Enables debugging of GitPython's git commands
GIT_PYTHON_TRACE = os.environ.get("GIT_PYTHON_TRACE", False)
# If True, a shell will be used when executing git commands.
# This should only be desirable on Windows, see https://github.com/gitpython-developers/GitPython/pull/126
# and check `git/test_repo.py:TestRepo.test_untracked_files()` TC for an example where it is required.
# Override this value using `Git.USE_SHELL = True`
USE_SHELL = False
# Provide the full path to the git executable. Otherwise it assumes git is in the path
_git_exec_env_var = "GIT_PYTHON_GIT_EXECUTABLE"
_refresh_env_var = "GIT_PYTHON_REFRESH"
GIT_PYTHON_GIT_EXECUTABLE = None
# note that the git executable is actually found during the refresh step in
# the top level __init__
@classmethod
def refresh(cls, path: Union[None, PathLike] = None) -> bool:
"""This gets called by the refresh function (see the top level
__init__).
"""
# discern which path to refresh with
if path is not None:
new_git = os.path.expanduser(path)
new_git = os.path.abspath(new_git)
else:
new_git = os.environ.get(cls._git_exec_env_var, cls.git_exec_name)
# keep track of the old and new git executable path
old_git = cls.GIT_PYTHON_GIT_EXECUTABLE
cls.GIT_PYTHON_GIT_EXECUTABLE = new_git
# test if the new git executable path is valid
# - a GitCommandNotFound error is spawned by ourselves
# - a PermissionError is spawned if the git executable provided
# cannot be executed for whatever reason
has_git = False
try:
cls().version()
has_git = True
except (GitCommandNotFound, PermissionError):
pass
# warn or raise exception if test failed
if not has_git:
err = dedent("""\
Bad git executable.
The git executable must be specified in one of the following ways:
- be included in your $PATH
- be set via $%s
- explicitly set via git.refresh()
""") % cls._git_exec_env_var
# revert to whatever the old_git was
cls.GIT_PYTHON_GIT_EXECUTABLE = old_git
if old_git is None:
# on the first refresh (when GIT_PYTHON_GIT_EXECUTABLE is
# None) we only are quiet, warn, or error depending on the
# GIT_PYTHON_REFRESH value
# determine what the user wants to happen during the initial
# refresh we expect GIT_PYTHON_REFRESH to either be unset or
# be one of the following values:
# 0|q|quiet|s|silence
# 1|w|warn|warning
# 2|r|raise|e|error
mode = os.environ.get(cls._refresh_env_var, "raise").lower()
quiet = ["quiet", "q", "silence", "s", "none", "n", "0"]
warn = ["warn", "w", "warning", "1"]
error = ["error", "e", "raise", "r", "2"]
if mode in quiet:
pass
elif mode in warn or mode in error:
err = dedent("""\
%s
All git commands will error until this is rectified.
This initial warning can be silenced or aggravated in the future by setting the
$%s environment variable. Use one of the following values:
- %s: for no warning or exception
- %s: for a printed warning
- %s: for a raised exception
Example:
export %s=%s
""") % (
err,
cls._refresh_env_var,
"|".join(quiet),
"|".join(warn),
"|".join(error),
cls._refresh_env_var,
quiet[0])
if mode in warn:
print("WARNING: %s" % err)
else:
raise ImportError(err)
else:
err = dedent("""\
%s environment variable has been set but it has been set with an invalid value.
Use only the following values:
- %s: for no warning or exception
- %s: for a printed warning
- %s: for a raised exception
""") % (
cls._refresh_env_var,
"|".join(quiet),
"|".join(warn),
"|".join(error))
raise ImportError(err)
# we get here if this was the init refresh and the refresh mode
# was not error, go ahead and set the GIT_PYTHON_GIT_EXECUTABLE
# such that we discern the difference between a first import
# and a second import
cls.GIT_PYTHON_GIT_EXECUTABLE = cls.git_exec_name
else:
# after the first refresh (when GIT_PYTHON_GIT_EXECUTABLE
# is no longer None) we raise an exception
raise GitCommandNotFound("git", err)
return has_git
@classmethod
def is_cygwin(cls) -> bool:
return is_cygwin_git(cls.GIT_PYTHON_GIT_EXECUTABLE)
@overload
@classmethod
def polish_url(cls, url: str, is_cygwin: Literal[False] = ...) -> str:
...
@overload
@classmethod
def polish_url(cls, url: str, is_cygwin: Union[None, bool] = None) -> str:
...
@classmethod
def polish_url(cls, url: str, is_cygwin: Union[None, bool] = None) -> PathLike:
if is_cygwin is None:
is_cygwin = cls.is_cygwin()
if is_cygwin:
url = cygpath(url)
else:
"""Remove any backslahes from urls to be written in config files.
Windows might create config-files containing paths with backslashed,
but git stops liking them as it will escape the backslashes.
Hence we undo the escaping just to be sure.
"""
url = os.path.expandvars(url)
if url.startswith('~'):
url = os.path.expanduser(url)
url = url.replace("\\\\", "\\").replace("\\", "/")
return url
class AutoInterrupt(object):
"""Kill/Interrupt the stored process instance once this instance goes out of scope. It is
used to prevent processes piling up in case iterators stop reading.
Besides all attributes are wired through to the contained process object.
The wait method was overridden to perform automatic status code checking
and possibly raise."""
__slots__ = ("proc", "args")
def __init__(self, proc: Union[None, subprocess.Popen], args: Any) -> None:
self.proc = proc
self.args = args
def __del__(self) -> None:
if self.proc is None:
return
proc = self.proc
self.proc = None
if proc.stdin:
proc.stdin.close()
if proc.stdout:
proc.stdout.close()
if proc.stderr:
proc.stderr.close()
# did the process finish already so we have a return code ?
try:
if proc.poll() is not None:
return None
except OSError as ex:
log.info("Ignored error after process had died: %r", ex)
# can be that nothing really exists anymore ...
if os is None or getattr(os, 'kill', None) is None:
return None
# try to kill it
try:
proc.terminate()
proc.wait() # ensure process goes away
except OSError as ex:
log.info("Ignored error after process had died: %r", ex)
except AttributeError:
# try windows
# for some reason, providing None for stdout/stderr still prints something. This is why
# we simply use the shell and redirect to nul. Its slower than CreateProcess, question
# is whether we really want to see all these messages. Its annoying no matter what.
if is_win:
call(("TASKKILL /F /T /PID %s 2>nul 1>nul" % str(proc.pid)), shell=True)
# END exception handling
def __getattr__(self, attr: str) -> Any:
return getattr(self.proc, attr)
# TODO: Bad choice to mimic `proc.wait()` but with different args.
def wait(self, stderr: Union[None, bytes] = b'') -> int:
"""Wait for the process and return its status code.
:param stderr: Previously read value of stderr, in case stderr is already closed.
:warn: may deadlock if output or error pipes are used and not handled separately.
:raise GitCommandError: if the return status is not 0"""
if stderr is None:
stderr = b''
stderr = force_bytes(data=stderr, encoding='utf-8')
if self.proc is not None:
status = self.proc.wait()
def read_all_from_possibly_closed_stream(stream):
try:
return stderr + force_bytes(stream.read())
except ValueError:
return stderr or b''
if status != 0:
errstr = read_all_from_possibly_closed_stream(self.proc.stderr)
log.debug('AutoInterrupt wait stderr: %r' % (errstr,))
raise GitCommandError(remove_password_if_present(self.args), status, errstr)
# END status handling
return status
# END auto interrupt
class CatFileContentStream(object):
"""Object representing a sized read-only stream returning the contents of
an object.
It behaves like a stream, but counts the data read and simulates an empty
stream once our sized content region is empty.
If not all data is read to the end of the objects's lifetime, we read the
rest to assure the underlying stream continues to work"""
__slots__: Tuple[str, ...] = ('_stream', '_nbr', '_size')
def __init__(self, size: int, stream: IO[bytes]) -> None:
self._stream = stream
self._size = size
self._nbr = 0 # num bytes read
# special case: if the object is empty, has null bytes, get the
# final newline right away.
if size == 0:
stream.read(1)
# END handle empty streams
def read(self, size: int = -1) -> bytes:
bytes_left = self._size - self._nbr
if bytes_left == 0:
return b''
if size > -1:
# assure we don't try to read past our limit
size = min(bytes_left, size)
else:
# they try to read all, make sure its not more than what remains
size = bytes_left
# END check early depletion
data = self._stream.read(size)
self._nbr += len(data)
# check for depletion, read our final byte to make the stream usable by others
if self._size - self._nbr == 0:
self._stream.read(1) # final newline
# END finish reading
return data
def readline(self, size: int = -1) -> bytes:
if self._nbr == self._size:
return b''
# clamp size to lowest allowed value
bytes_left = self._size - self._nbr
if size > -1:
size = min(bytes_left, size)
else:
size = bytes_left
# END handle size
data = self._stream.readline(size)
self._nbr += len(data)
# handle final byte
if self._size - self._nbr == 0:
self._stream.read(1)
# END finish reading
return data
def readlines(self, size: int = -1) -> List[bytes]:
if self._nbr == self._size:
return []
# leave all additional logic to our readline method, we just check the size
out = []
nbr = 0
while True:
line = self.readline()
if not line:
break
out.append(line)
if size > -1:
nbr += len(line)
if nbr > size:
break
# END handle size constraint
# END readline loop
return out
# skipcq: PYL-E0301
def __iter__(self) -> 'Git.CatFileContentStream':
return self
def __next__(self) -> bytes:
return self.next()
def next(self) -> bytes:
line = self.readline()
if not line:
raise StopIteration
return line
def __del__(self) -> None:
bytes_left = self._size - self._nbr
if bytes_left:
# read and discard - seeking is impossible within a stream
# includes terminating newline
self._stream.read(bytes_left + 1)
# END handle incomplete read
def __init__(self, working_dir: Union[None, PathLike] = None):
"""Initialize this instance with:
:param working_dir:
Git directory we should work in. If None, we always work in the current
directory as returned by os.getcwd().
It is meant to be the working tree directory if available, or the
.git directory in case of bare repositories."""
super(Git, self).__init__()
self._working_dir = expand_path(working_dir)
self._git_options = () # type: Union[List[str], Tuple[str, ...]]
self._persistent_git_options = [] # type: List[str]
# Extra environment variables to pass to git commands
self._environment = {} # type: Dict[str, str]
# cached command slots
self.cat_file_header = None
self.cat_file_all = None
def __getattr__(self, name: str) -> Any:
"""A convenience method as it allows to call the command as if it was
an object.
:return: Callable object that will execute call _call_process with your arguments."""
if name[0] == '_':
return LazyMixin.__getattr__(self, name)
return lambda *args, **kwargs: self._call_process(name, *args, **kwargs)
def set_persistent_git_options(self, **kwargs: Any) -> None:
"""Specify command line options to the git executable
for subsequent subcommand calls
:param kwargs:
is a dict of keyword arguments.
these arguments are passed as in _call_process
but will be passed to the git command rather than
the subcommand.
"""
self._persistent_git_options = self.transform_kwargs(
split_single_char_options=True, **kwargs)
def _set_cache_(self, attr: str) -> None:
if attr == '_version_info':
# We only use the first 4 numbers, as everything else could be strings in fact (on windows)
process_version = self._call_process('version') # should be as default *args and **kwargs used
version_numbers = process_version.split(' ')[2]
self._version_info = tuple(
int(n) for n in version_numbers.split('.')[:4] if n.isdigit()
) # type: Tuple[int, int, int, int] # type: ignore
else:
super(Git, self)._set_cache_(attr)
# END handle version info
@property
def working_dir(self) -> Union[None, PathLike]:
""":return: Git directory we are working on"""
return self._working_dir
@property
def version_info(self) -> Tuple[int, int, int, int]:
"""
:return: tuple(int, int, int, int) tuple with integers representing the major, minor
and additional version numbers as parsed from git version.
This value is generated on demand and is cached"""
return self._version_info
@overload
def execute(self,
command: Union[str, Sequence[Any]],
*,
as_process: Literal[True]
) -> 'AutoInterrupt':
...
@overload
def execute(self,
command: Union[str, Sequence[Any]],
*,
as_process: Literal[False] = False,
stdout_as_string: Literal[True]
) -> Union[str, Tuple[int, str, str]]:
...
@overload
def execute(self,
command: Union[str, Sequence[Any]],
*,
as_process: Literal[False] = False,
stdout_as_string: Literal[False] = False
) -> Union[bytes, Tuple[int, bytes, str]]:
...
@overload
def execute(self,
command: Union[str, Sequence[Any]],
*,
with_extended_output: Literal[False],
as_process: Literal[False],
stdout_as_string: Literal[True]
) -> str:
...
@overload
def execute(self,
command: Union[str, Sequence[Any]],
*,
with_extended_output: Literal[False],
as_process: Literal[False],
stdout_as_string: Literal[False]
) -> bytes:
...
def execute(self,
command: Union[str, Sequence[Any]],
istream: Union[None, BinaryIO] = None,
with_extended_output: bool = False,
with_exceptions: bool = True,
as_process: bool = False,
output_stream: Union[None, BinaryIO] = None,
stdout_as_string: bool = True,
kill_after_timeout: Union[None, int] = None,
with_stdout: bool = True,
universal_newlines: bool = False,
shell: Union[None, bool] = None,
env: Union[None, Mapping[str, str]] = None,
max_chunk_size: int = io.DEFAULT_BUFFER_SIZE,
**subprocess_kwargs: Any
) -> Union[str, bytes, Tuple[int, Union[str, bytes], str], AutoInterrupt]:
"""Handles executing the command on the shell and consumes and returns
the returned information (stdout)
:param command:
The command argument list to execute.
It should be a string, or a sequence of program arguments. The
program to execute is the first item in the args sequence or string.
:param istream:
Standard input filehandle passed to subprocess.Popen.
:param with_extended_output:
Whether to return a (status, stdout, stderr) tuple.
:param with_exceptions:
Whether to raise an exception when git returns a non-zero status.
:param as_process:
Whether to return the created process instance directly from which
streams can be read on demand. This will render with_extended_output and
with_exceptions ineffective - the caller will have
to deal with the details himself.
It is important to note that the process will be placed into an AutoInterrupt
wrapper that will interrupt the process once it goes out of scope. If you
use the command in iterators, you should pass the whole process instance
instead of a single stream.
:param output_stream:
If set to a file-like object, data produced by the git command will be
output to the given stream directly.
This feature only has any effect if as_process is False. Processes will
always be created with a pipe due to issues with subprocess.
This merely is a workaround as data will be copied from the
output pipe to the given output stream directly.
Judging from the implementation, you shouldn't use this flag !
:param stdout_as_string:
if False, the commands standard output will be bytes. Otherwise, it will be
decoded into a string using the default encoding (usually utf-8).
The latter can fail, if the output contains binary data.
:param env:
A dictionary of environment variables to be passed to `subprocess.Popen`.
:param max_chunk_size:
Maximum number of bytes in one chunk of data passed to the output_stream in
one invocation of write() method. If the given number is not positive then
the default value is used.
:param subprocess_kwargs:
Keyword arguments to be passed to subprocess.Popen. Please note that
some of the valid kwargs are already set by this method, the ones you
specify may not be the same ones.
:param with_stdout: If True, default True, we open stdout on the created process
:param universal_newlines:
if True, pipes will be opened as text, and lines are split at
all known line endings.
:param shell:
Whether to invoke commands through a shell (see `Popen(..., shell=True)`).
It overrides :attr:`USE_SHELL` if it is not `None`.
:param kill_after_timeout:
To specify a timeout in seconds for the git command, after which the process
should be killed. This will have no effect if as_process is set to True. It is
set to None by default and will let the process run until the timeout is
explicitly specified. This feature is not supported on Windows. It's also worth
noting that kill_after_timeout uses SIGKILL, which can have negative side
effects on a repository. For example, stale locks in case of git gc could
render the repository incapable of accepting changes until the lock is manually
removed.
:return:
* str(output) if extended_output = False (Default)
* tuple(int(status), str(stdout), str(stderr)) if extended_output = True
if output_stream is True, the stdout value will be your output stream:
* output_stream if extended_output = False
* tuple(int(status), output_stream, str(stderr)) if extended_output = True
Note git is executed with LC_MESSAGES="C" to ensure consistent
output regardless of system language.
:raise GitCommandError:
:note:
If you add additional keyword arguments to the signature of this method,
you must update the execute_kwargs tuple housed in this module."""
# Remove password for the command if present
redacted_command = remove_password_if_present(command)
if self.GIT_PYTHON_TRACE and (self.GIT_PYTHON_TRACE != 'full' or as_process):
log.info(' '.join(redacted_command))
# Allow the user to have the command executed in their working dir.
cwd = self._working_dir or os.getcwd()
# Start the process
inline_env = env
env = os.environ.copy()
# Attempt to force all output to plain ascii english, which is what some parsing code
# may expect.
# According to stackoverflow (http://goo.gl/l74GC8), we are setting LANGUAGE as well
# just to be sure.
env["LANGUAGE"] = "C"
env["LC_ALL"] = "C"
env.update(self._environment)
if inline_env is not None:
env.update(inline_env)
if is_win:
cmd_not_found_exception = OSError
if kill_after_timeout:
raise GitCommandError(redacted_command, '"kill_after_timeout" feature is not supported on Windows.')
else:
if sys.version_info[0] > 2:
cmd_not_found_exception = FileNotFoundError # NOQA # exists, flake8 unknown @UndefinedVariable
else:
cmd_not_found_exception = OSError
# end handle
stdout_sink = (PIPE
if with_stdout
else getattr(subprocess, 'DEVNULL', None) or open(os.devnull, 'wb'))
istream_ok = "None"
if istream:
istream_ok = "<valid stream>"
log.debug("Popen(%s, cwd=%s, universal_newlines=%s, shell=%s, istream=%s)",
redacted_command, cwd, universal_newlines, shell, istream_ok)
try:
proc = Popen(command,
env=env,
cwd=cwd,
bufsize=-1,
stdin=istream,
stderr=PIPE,
stdout=stdout_sink,
shell=shell is not None and shell or self.USE_SHELL,
close_fds=is_posix, # unsupported on windows
universal_newlines=universal_newlines,
creationflags=PROC_CREATIONFLAGS,
**subprocess_kwargs
)
except cmd_not_found_exception as err:
raise GitCommandNotFound(redacted_command, err) from err
else:
proc = cast(Popen, proc)
proc.stdout = cast(BinaryIO, proc.stdout)
proc.stderr = cast(BinaryIO, proc.stderr)
if as_process:
return self.AutoInterrupt(proc, command)
def _kill_process(pid: int) -> None:
""" Callback method to kill a process. """
p = Popen(['ps', '--ppid', str(pid)], stdout=PIPE,
creationflags=PROC_CREATIONFLAGS)
child_pids = []
if p.stdout is not None:
for line in p.stdout:
if len(line.split()) > 0:
local_pid = (line.split())[0]
if local_pid.isdigit():
child_pids.append(int(local_pid))
try:
# Windows does not have SIGKILL, so use SIGTERM instead
sig = getattr(signal, 'SIGKILL', signal.SIGTERM)
os.kill(pid, sig)
for child_pid in child_pids:
try:
os.kill(child_pid, sig)
except OSError:
pass
kill_check.set() # tell the main routine that the process was killed
except OSError:
# It is possible that the process gets completed in the duration after timeout
# happens and before we try to kill the process.
pass
return
# end
if kill_after_timeout:
kill_check = threading.Event()
watchdog = threading.Timer(kill_after_timeout, _kill_process, args=(proc.pid,))
# Wait for the process to return
status = 0
stdout_value = b'' # type: Union[str, bytes]
stderr_value = b'' # type: Union[str, bytes]
newline = "\n" if universal_newlines else b"\n"
try:
if output_stream is None:
if kill_after_timeout:
watchdog.start()
stdout_value, stderr_value = proc.communicate()
if kill_after_timeout:
watchdog.cancel()
if kill_check.is_set():
stderr_value = ('Timeout: the command "%s" did not complete in %d '
'secs.' % (" ".join(redacted_command), kill_after_timeout))
if not universal_newlines:
stderr_value = stderr_value.encode(defenc)
# strip trailing "\n"
if stdout_value.endswith(newline): # type: ignore
stdout_value = stdout_value[:-1]
if stderr_value.endswith(newline): # type: ignore
stderr_value = stderr_value[:-1]
status = proc.returncode
else:
max_chunk_size = max_chunk_size if max_chunk_size and max_chunk_size > 0 else io.DEFAULT_BUFFER_SIZE
stream_copy(proc.stdout, output_stream, max_chunk_size)
stdout_value = proc.stdout.read()
stderr_value = proc.stderr.read()
# strip trailing "\n"
if stderr_value.endswith(newline): # type: ignore
stderr_value = stderr_value[:-1]
status = proc.wait()
# END stdout handling
finally:
proc.stdout.close()
proc.stderr.close()
if self.GIT_PYTHON_TRACE == 'full':
cmdstr = " ".join(redacted_command)
def as_text(stdout_value):
return not output_stream and safe_decode(stdout_value) or '<OUTPUT_STREAM>'
# end
if stderr_value:
log.info("%s -> %d; stdout: '%s'; stderr: '%s'",
cmdstr, status, as_text(stdout_value), safe_decode(stderr_value))
elif stdout_value:
log.info("%s -> %d; stdout: '%s'", cmdstr, status, as_text(stdout_value))
else:
log.info("%s -> %d", cmdstr, status)
# END handle debug printing
if with_exceptions and status != 0:
raise GitCommandError(redacted_command, status, stderr_value, stdout_value)
if isinstance(stdout_value, bytes) and stdout_as_string: # could also be output_stream
stdout_value = safe_decode(stdout_value)
# Allow access to the command's status code
if with_extended_output:
return (status, stdout_value, safe_decode(stderr_value))
else:
return stdout_value
def environment(self):
return self._environment
def update_environment(self, **kwargs):
"""
Set environment variables for future git invocations. Return all changed
values in a format that can be passed back into this function to revert
the changes:
``Examples``::
old_env = self.update_environment(PWD='/tmp')
self.update_environment(**old_env)
:param kwargs: environment variables to use for git processes
:return: dict that maps environment variables to their old values
"""
old_env = {}
for key, value in kwargs.items():
# set value if it is None
if value is not None:
old_env[key] = self._environment.get(key)
self._environment[key] = value
# remove key from environment if its value is None
elif key in self._environment:
old_env[key] = self._environment[key]
del self._environment[key]
return old_env
@contextmanager
def custom_environment(self, **kwargs):
"""
A context manager around the above ``update_environment`` method to restore the
environment back to its previous state after operation.
``Examples``::
with self.custom_environment(GIT_SSH='/bin/ssh_wrapper'):
repo.remotes.origin.fetch()
:param kwargs: see update_environment
"""
old_env = self.update_environment(**kwargs)
try:
yield
finally:
self.update_environment(**old_env)
def transform_kwarg(self, name: str, value: Any, split_single_char_options: bool) -> List[str]:
if len(name) == 1:
if value is True:
return ["-%s" % name]
elif value not in (False, None):
if split_single_char_options:
return ["-%s" % name, "%s" % value]
else:
return ["-%s%s" % (name, value)]
else:
if value is True:
return ["--%s" % dashify(name)]
elif value is not False and value is not None:
return ["--%s=%s" % (dashify(name), value)]
return []
def transform_kwargs(self, split_single_char_options: bool = True, **kwargs: Any) -> List[str]:
"""Transforms Python style kwargs into git command line options."""
args = []
for k, v in kwargs.items():
if isinstance(v, (list, tuple)):
for value in v:
args += self.transform_kwarg(k, value, split_single_char_options)
else:
args += self.transform_kwarg(k, v, split_single_char_options)
return args
@classmethod
def __unpack_args(cls, arg_list: Sequence[str]) -> List[str]:
if not isinstance(arg_list, (list, tuple)):
return [str(arg_list)]
outlist = []
for arg in arg_list:
if isinstance(arg_list, (list, tuple)):
outlist.extend(cls.__unpack_args(arg))
# END recursion
else:
outlist.append(str(arg))
# END for each arg
return outlist
def __call__(self, **kwargs: Any) -> 'Git':
"""Specify command line options to the git executable
for a subcommand call
:param kwargs:
is a dict of keyword arguments.
these arguments are passed as in _call_process
but will be passed to the git command rather than
the subcommand.
``Examples``::
git(work_tree='/tmp').difftool()"""
self._git_options = self.transform_kwargs(
split_single_char_options=True, **kwargs)
return self
@overload
def _call_process(self, method: str, *args: None, **kwargs: None
) -> str:
... # if no args given, execute called with all defaults
@overload
def _call_process(self, method: str, *args: Any, **kwargs: Any
) -> Union[str, bytes, Tuple[int, Union[str, bytes], str], 'Git.AutoInterrupt']:
...
def _call_process(self, method: str, *args: Any, **kwargs: Any
) -> Union[str, bytes, Tuple[int, Union[str, bytes], str], 'Git.AutoInterrupt']:
"""Run the given git command with the specified arguments and return
the result as a String
:param method:
is the command. Contained "_" characters will be converted to dashes,
such as in 'ls_files' to call 'ls-files'.
:param args:
is the list of arguments. If None is included, it will be pruned.
This allows your commands to call git more conveniently as None
is realized as non-existent
:param kwargs:
It contains key-values for the following:
- the :meth:`execute()` kwds, as listed in :var:`execute_kwargs`;
- "command options" to be converted by :meth:`transform_kwargs()`;
- the `'insert_kwargs_after'` key which its value must match one of ``*args``,
and any cmd-options will be appended after the matched arg.
Examples::
git.rev_list('master', max_count=10, header=True)
turns into::
git rev-list max-count 10 --header master
:return: Same as ``execute``
if no args given used execute default (esp. as_process = False, stdout_as_string = True)
and return str """
# Handle optional arguments prior to calling transform_kwargs
# otherwise these'll end up in args, which is bad.
exec_kwargs = {k: v for k, v in kwargs.items() if k in execute_kwargs}
opts_kwargs = {k: v for k, v in kwargs.items() if k not in execute_kwargs}
insert_after_this_arg = opts_kwargs.pop('insert_kwargs_after', None)
# Prepare the argument list
opt_args = self.transform_kwargs(**opts_kwargs)
ext_args = self.__unpack_args([a for a in args if a is not None])
if insert_after_this_arg is None:
args_list = opt_args + ext_args
else:
try:
index = ext_args.index(insert_after_this_arg)
except ValueError as err:
raise ValueError("Couldn't find argument '%s' in args %s to insert cmd options after"
% (insert_after_this_arg, str(ext_args))) from err
# end handle error
args_list = ext_args[:index + 1] + opt_args + ext_args[index + 1:]
# end handle opts_kwargs
call = [self.GIT_PYTHON_GIT_EXECUTABLE]
# add persistent git options
call.extend(self._persistent_git_options)
# add the git options, then reset to empty
# to avoid side_effects
call.extend(self._git_options)
self._git_options = ()
call.append(dashify(method))
call.extend(args_list)
return self.execute(call, **exec_kwargs)
def _parse_object_header(self, header_line: str) -> Tuple[str, str, int]:
"""
:param header_line:
<hex_sha> type_string size_as_int
:return: (hex_sha, type_string, size_as_int)
:raise ValueError: if the header contains indication for an error due to
incorrect input sha"""
tokens = header_line.split()
if len(tokens) != 3:
if not tokens:
raise ValueError("SHA could not be resolved, git returned: %r" % (header_line.strip()))
else:
raise ValueError("SHA %s could not be resolved, git returned: %r" % (tokens[0], header_line.strip()))
# END handle actual return value
# END error handling
if len(tokens[0]) != 40:
raise ValueError("Failed to parse header: %r" % header_line)
return (tokens[0], tokens[1], int(tokens[2]))
def _prepare_ref(self, ref: AnyStr) -> bytes:
# required for command to separate refs on stdin, as bytes
if isinstance(ref, bytes):
# Assume 40 bytes hexsha - bin-to-ascii for some reason returns bytes, not text
refstr = ref.decode('ascii') # type: str
elif not isinstance(ref, str):
refstr = str(ref) # could be ref-object
else:
refstr = ref
if not refstr.endswith("\n"):
refstr += "\n"
return refstr.encode(defenc)
def _get_persistent_cmd(self, attr_name: str, cmd_name: str, *args: Any, **kwargs: Any
) -> Union['Git.AutoInterrupt', TBD]:
cur_val = getattr(self, attr_name)
if cur_val is not None:
return cur_val
options = {"istream": PIPE, "as_process": True}
options.update(kwargs)
cmd = self._call_process(cmd_name, *args, **options)
setattr(self, attr_name, cmd)
return cmd
def __get_object_header(self, cmd, ref: AnyStr) -> Tuple[str, str, int]:
cmd.stdin.write(self._prepare_ref(ref))
cmd.stdin.flush()
return self._parse_object_header(cmd.stdout.readline())
def get_object_header(self, ref: str) -> Tuple[str, str, int]:
""" Use this method to quickly examine the type and size of the object behind
the given ref.
:note: The method will only suffer from the costs of command invocation
once and reuses the command in subsequent calls.
:return: (hexsha, type_string, size_as_int)"""
cmd = self._get_persistent_cmd("cat_file_header", "cat_file", batch_check=True)
return self.__get_object_header(cmd, ref)
def get_object_data(self, ref: str) -> Tuple[str, str, int, bytes]:
""" As get_object_header, but returns object data as well
:return: (hexsha, type_string, size_as_int,data_string)
:note: not threadsafe"""
hexsha, typename, size, stream = self.stream_object_data(ref)
data = stream.read(size)
del(stream)
return (hexsha, typename, size, data)
def stream_object_data(self, ref: str) -> Tuple[str, str, int, 'Git.CatFileContentStream']:
""" As get_object_header, but returns the data as a stream
:return: (hexsha, type_string, size_as_int, stream)
:note: This method is not threadsafe, you need one independent Command instance per thread to be safe !"""
cmd = self._get_persistent_cmd("cat_file_all", "cat_file", batch=True)
hexsha, typename, size = self.__get_object_header(cmd, ref)
return (hexsha, typename, size, self.CatFileContentStream(size, cmd.stdout))
def clear_cache(self) -> 'Git':
"""Clear all kinds of internal caches to release resources.
Currently persistent commands will be interrupted.
:return: self"""
for cmd in (self.cat_file_all, self.cat_file_header):
if cmd:
cmd.__del__()
self.cat_file_all = None
self.cat_file_header = None
return self
|
py | 7df8575e40a37e8b18e5fe9fb7b8ac13e21dedb0 | #!/usr/bin/env python
# Make all randomness deterministic
import random
import argparse
import torch
import os
import numpy as np
random.seed(1337)
torch.manual_seed(1337)
np.random.seed(1337)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
from shlex import split
from .yolo_train import prepare_training_loop
from . import yolo_train
from .yolo_models import * # set ONNX_EXPORT in models.py
from .yolo_utils.datasets import *
from .yolo_utils.utils import *
from pathlib import Path
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
class Model(BenchmarkModel):
task = COMPUTER_VISION.SEGMENTATION
def __init__(self, device=None, jit=False):
super().__init__()
self.device = device
self.jit = jit
def get_module(self):
if self.jit:
raise NotImplementedError()
parser = argparse.ArgumentParser()
root = str(Path(yolo_train.__file__).parent.absolute())
parser.add_argument('--cfg', type=str, default=f'{root}/cfg/yolov3-spp.cfg', help='*.cfg path')
parser.add_argument('--names', type=str, default=f'{root}/data/coco.names', help='*.names path')
parser.add_argument('--weights', type=str, default='weights/yolov3-spp-ultralytics.pt', help='weights path')
parser.add_argument('--source', type=str, default='data/samples', help='source') # input file/folder, 0 for webcam
parser.add_argument('--output', type=str, default='output', help='output folder') # output folder
parser.add_argument('--img-size', type=int, default=512, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.3, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
parser.add_argument('--fourcc', type=str, default='mp4v', help='output video codec (verify ffmpeg support)')
parser.add_argument('--half', action='store_true', help='half precision FP16 inference')
parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1) or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
opt = parser.parse_args(['--device', self.device])
opt.cfg = check_file(opt.cfg) # check file
opt.names = check_file(opt.names) # check file
model = Darknet(opt.cfg, opt.img_size)
model.to(opt.device).eval()
input = (torch.rand(1, 3, 384, 512).to(opt.device),)
return model, input
def set_train(self):
# another model instance is used for training
# and the train mode is on by default
pass
def train(self, niterations=1):
# the training process is not patched to use scripted models
if self.jit:
raise NotImplementedError()
if self.device == 'cpu':
raise NotImplementedError("Disabled due to excessively slow runtime - see GH Issue #100")
root = str(Path(yolo_train.__file__).parent.absolute())
train_args = split(f"--data {root}/data/coco128.data --img 416 --batch 8 --nosave --notest --epochs 1 --device {self.device} --weights ''")
print(train_args)
training_loop = prepare_training_loop(train_args)
return training_loop(niterations)
def eval(self, niterations=1):
model, example_inputs = self.get_module()
img = example_inputs[0]
im0s_shape = (480, 640, 3)
for i in range(niterations):
pred = model(img, augment=False)[0]
# Apply NMS
pred = non_max_suppression(pred, 0.3, 0.6,
multi_label=False, classes=None, agnostic=False)
if __name__ == '__main__':
m = Model(device='cpu', jit=False)
model, example_inputs = m.get_module()
model(*example_inputs)
m.train()
m.eval()
|
py | 7df858cda696e66d5e84cb08aaf4e5c4cd119379 | """Auto-generated file, do not edit by hand. SM metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_SM = PhoneMetadata(id='SM', country_code=378, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[05-7]\\d{7,9}', possible_length=(8, 10), possible_length_local_only=(6,)),
fixed_line=PhoneNumberDesc(national_number_pattern='0549(?:8[0157-9]|9\\d)\\d{4}', example_number='0549886377', possible_length=(10,), possible_length_local_only=(6,)),
mobile=PhoneNumberDesc(national_number_pattern='6[16]\\d{6}', example_number='66661212', possible_length=(8,)),
premium_rate=PhoneNumberDesc(national_number_pattern='7[178]\\d{6}', example_number='71123456', possible_length=(8,)),
voip=PhoneNumberDesc(national_number_pattern='5[158]\\d{6}', example_number='58001110', possible_length=(8,)),
national_prefix_for_parsing='(?:0549)?([89]\\d{5})',
national_prefix_transform_rule='0549\\1',
number_format=[NumberFormat(pattern='(\\d{2})(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['[5-7]']),
NumberFormat(pattern='(0549)(\\d{6})', format='\\1 \\2', leading_digits_pattern=['054', '0549']),
NumberFormat(pattern='(\\d{6})', format='0549 \\1', leading_digits_pattern=['[89]'])],
intl_number_format=[NumberFormat(pattern='(\\d{2})(\\d{2})(\\d{2})(\\d{2})', format='\\1 \\2 \\3 \\4', leading_digits_pattern=['[5-7]']),
NumberFormat(pattern='(0549)(\\d{6})', format='(\\1) \\2', leading_digits_pattern=['054', '0549']),
NumberFormat(pattern='(\\d{6})', format='(0549) \\1', leading_digits_pattern=['[89]'])])
|
py | 7df858f06714210e36f3b0607fda8120567d2d95 | import numpy as np
from typing import Callable, Type, NamedTuple, Optional
import copy
import logging
import ConfigSpace as CS
from .debug_log import DebugLogPrinter
from .gp_profiling import GPMXNetSimpleProfiler
from .hp_ranges import HyperparameterRanges_CS
from ..datatypes.common import CandidateEvaluation, Candidate, candidate_for_print, PendingEvaluation
from ..datatypes.hp_ranges import HyperparameterRanges
from ..datatypes.tuning_job_state import TuningJobState
from ..models.gp_model import GaussProcSurrogateModel, GPModel
from ..models.gpmodel_skipopt import SkipOptimizationPredicate
from ..models.gpmodel_transformers import GPModelPendingCandidateStateTransformer, GPModelArgs
from ..tuning_algorithms.base_classes import LocalOptimizer, AcquisitionFunction, ScoringFunction
from ..tuning_algorithms.bo_algorithm import BayesianOptimizationAlgorithm
from ..tuning_algorithms.bo_algorithm_components import IndependentThompsonSampling
from ..tuning_algorithms.common import RandomStatefulCandidateGenerator, compute_blacklisted_candidates
from ..tuning_algorithms.default_algorithm import dictionarize_objective, \
DEFAULT_METRIC, DEFAULT_LOCAL_OPTIMIZER_CLASS, \
DEFAULT_NUM_INITIAL_CANDIDATES, DEFAULT_NUM_INITIAL_RANDOM_EVALUATIONS
from ..utils.duplicate_detector import DuplicateDetectorIdentical
logger = logging.getLogger(__name__)
GET_CONFIG_RANDOM_RETRIES = 50
class MapReward(NamedTuple):
forward: Callable[[float], float]
reverse: Callable[[float], float]
def __call__(self, x: float) -> float:
return self.forward(x)
def accumulate_profiling_record(
cum_records: dict, profiler: GPMXNetSimpleProfiler, pick_random: bool):
# Pull out profiling data from this block
block_id = profiler.id_counter - 1
curr_record = {
r.tag: r.duration for r in profiler.records
if r.id == block_id}
if pick_random:
curr_record['num_random'] = 1
curr_record['total_all'] = curr_record['random']
else:
curr_record['num_model'] = 1
# Sum up entries
for k, v in curr_record.items():
if k in cum_records:
cum_records[k] += v
else:
cum_records[k] = v
SUPPORTED_INITIAL_SCORING = {
'thompson_indep',
'acq_func'}
DEFAULT_INITIAL_SCORING = 'thompson_indep'
def create_initial_candidates_scorer(
initial_scoring: str, model: GaussProcSurrogateModel,
acquisition_class: Type[AcquisitionFunction],
random_state: np.random.RandomState) -> ScoringFunction:
if initial_scoring == 'thompson_indep':
return IndependentThompsonSampling(model, random_state=random_state)
else:
return acquisition_class(model)
def check_initial_candidates_scorer(initial_scoring: str) -> str:
if initial_scoring is None:
return DEFAULT_INITIAL_SCORING
else:
assert initial_scoring in SUPPORTED_INITIAL_SCORING, \
"initial_scoring = '{}' is not supported".format(
initial_scoring)
return initial_scoring
class GPFIFOSearcher(object):
"""
Supports standard GP-based hyperparameter optimization, when used with a
FIFO scheduler.
"""
def __init__(
self, hp_ranges: HyperparameterRanges, random_seed: int,
gpmodel: GPModel, model_args: GPModelArgs,
map_reward: MapReward,
acquisition_class: Type[AcquisitionFunction],
init_state: TuningJobState = None,
local_minimizer_class: Type[LocalOptimizer] = DEFAULT_LOCAL_OPTIMIZER_CLASS,
skip_optimization: SkipOptimizationPredicate = None,
num_initial_candidates: int = DEFAULT_NUM_INITIAL_CANDIDATES,
num_initial_random_choices: int = DEFAULT_NUM_INITIAL_RANDOM_EVALUATIONS,
initial_scoring: Optional[str] = None,
profiler: Optional[GPMXNetSimpleProfiler] = None,
first_is_default: bool = True,
debug_log: Optional[DebugLogPrinter] = None):
"""
Note that the SurrogateModel is created on demand (by the state
transformer) in get_config, along with components needed for the BO
algorithm.
The searcher is supposed to maximize reward, while internally, the
criterion is minimized. map_reward maps reward to internal criterion, it
must be strictly decreasing.
:param hp_ranges: Configuration space without resource attribute
:param random_seed:
:param gpmodel: GP regression model
:param model_args: Arguments for GPMXNet model creation
:param map_reward: Function mapping reward to criterion to be minimized
:param acquisition_class: Type for acquisition function
:param init_state: TuningJobState to start from (default is empty)
:param local_minimizer_class: Type for local minimizer
:param skip_optimization: Predicate, see
GPMXNetPendingCandidateStateTransformer
:param num_initial_candidates: See BayesianOptimizationAlgorithm
:param num_initial_random_choices: Configs are sampled at random until
this many candidates received label feedback
:param initial_scoring: Scoring function to rank initial candidates.
Default: thompson_indep (independent Thompson sampling)
:param profiler: If given, HPO computations are profiled
:param first_is_default: If true, the first config to be evaluated is
the default of the search space. Otherwise, the first is sampled
at random
:param debug_log: DebugLogPrinter for debug logging (optional)
"""
self.hp_ranges = hp_ranges
self.random_seed = random_seed
self.num_initial_candidates = num_initial_candidates
self.num_initial_random_choices = num_initial_random_choices
self.map_reward = map_reward
self.local_minimizer_class = local_minimizer_class
self.acquisition_class = acquisition_class
self.debug_log = debug_log
self.initial_scoring = check_initial_candidates_scorer(initial_scoring)
# Create state transformer
# Initial state is empty (note that the state is mutable)
if init_state is None:
init_state = TuningJobState(
hp_ranges=hp_ranges,
candidate_evaluations=[],
failed_candidates=[],
pending_evaluations=[])
else:
assert hp_ranges is init_state.hp_ranges, \
"hp_ranges and init_state.hp_ranges must be same object"
self.state_transformer = GPModelPendingCandidateStateTransformer(
gpmodel=gpmodel,
init_state=init_state,
model_args=model_args,
skip_optimization=skip_optimization,
profiler=profiler,
debug_log=debug_log)
self.random_state = np.random.RandomState(random_seed)
self.random_generator = RandomStatefulCandidateGenerator(
hp_ranges, random_state=self.random_state)
self.profiler = profiler
self.do_profile = (profiler is not None)
self.first_is_default = first_is_default
if first_is_default:
assert isinstance(hp_ranges, HyperparameterRanges_CS), \
"If first_is_default, must have hp_ranges of HyperparameterRanges_CS type"
if debug_log is not None:
assert isinstance(hp_ranges, HyperparameterRanges_CS), \
"If debug_log is given, must have hp_ranges of HyperparameterRanges_CS type"
# Sums up profiling records across all get_config calls
self._profile_record = dict()
if debug_log is not None:
deb_msg = "[GPFIFOSearcher.__init__]\n"
deb_msg += ("- acquisition_class = {}\n".format(acquisition_class))
deb_msg += ("- local_minimizer_class = {}\n".format(local_minimizer_class))
deb_msg += ("- num_initial_candidates = {}\n".format(num_initial_candidates))
deb_msg += ("- num_initial_random_choices = {}\n".format(num_initial_random_choices))
deb_msg += ("- initial_scoring = {}\n".format(self.initial_scoring))
deb_msg += ("- first_is_default = {}".format(first_is_default))
logger.info(deb_msg)
def update(self, config: Candidate, reward: float):
"""
Registers new datapoint at config, with reward reward.
Note that in general, config should previously have been registered as
pending (register_pending). If so, it is switched from pending
to labeled. If not, it is considered directly labeled.
:param config:
:param reward:
"""
crit_val = self.map_reward(reward)
self.state_transformer.label_candidate(CandidateEvaluation(
candidate=copy.deepcopy(config),
metrics=dictionarize_objective(crit_val)))
if self.debug_log is not None:
config_id = self.debug_log.config_id(config)
msg = "Update for config_id {}: reward = {}, crit_val = {}".format(
config_id, reward, crit_val)
logger.info(msg)
def register_pending(self, config: Candidate):
"""
Registers config as pending. This means the corresponding evaluation
task is running. Once it finishes, update is called for config.
"""
# It is OK for the candidate already to be registered as pending, in
# which case we do nothing
state = self.state_transformer.state
if config not in state.pending_candidates:
if config in (x.candidate for x in state.candidate_evaluations):
evals = state.candidate_evaluations
num_labeled = len(evals)
pos_cand = next(
i for i, x in enumerate(evals) if x.candidate == config)
error_msg = """
This configuration is already registered as labeled:
Position of labeled candidate: {} of {}
Label value: {}
""".format(
pos_cand, num_labeled,
evals[pos_cand].metrics[DEFAULT_METRIC])
assert False, error_msg
self.state_transformer.append_candidate(config)
def get_config(self) -> Candidate:
"""
Runs Bayesian optimization in order to suggest the next config to evaluate.
:return: Next config to evaluate at
"""
state = self.state_transformer.state
if self.do_profile:
fit_hyperparams = not self.state_transformer.skip_optimization(
state)
self.profiler.set_state(state, fit_hyperparams)
blacklisted_candidates = compute_blacklisted_candidates(state)
pick_random = (len(blacklisted_candidates) < self.num_initial_random_choices) or \
(not state.candidate_evaluations)
if self.debug_log is not None:
self.debug_log.start_get_config('random' if pick_random else 'BO')
if pick_random:
config = None
if self.first_is_default and (not blacklisted_candidates):
# Use default configuration if there is one specified
default_config = self.hp_ranges.config_space.get_default_configuration()
if default_config and len(default_config.get_dictionary()) > 0:
config = default_config
if self.debug_log is not None:
logger.info("Start with default config:\n{}".format(
candidate_for_print(config)))
if config is None:
if self.do_profile:
self.profiler.start('random')
for _ in range(GET_CONFIG_RANDOM_RETRIES):
_config = self.hp_ranges.random_candidate(self.random_state)
if _config not in blacklisted_candidates:
config = _config
break
if config is None:
raise AssertionError(
"Failed to sample a configuration not already chosen "
"before. Maybe there are no free configurations left? "
"The blacklist size is {}".format(len(blacklisted_candidates)))
if self.do_profile:
self.profiler.stop('random')
else:
# Obtain current SurrogateModel from state transformer. Based on
# this, the BO algorithm components can be constructed
state = self.state_transformer.state
if self.do_profile:
self.profiler.start('total_all')
self.profiler.start('total_update')
# Note: Asking for the model triggers the posterior computation
model = self.state_transformer.model()
if self.do_profile:
self.profiler.stop('total_update')
# Create BO algorithm
initial_candidates_scorer = create_initial_candidates_scorer(
self.initial_scoring, model, self.acquisition_class,
self.random_state)
local_optimizer = self.local_minimizer_class(
state, model, self.acquisition_class)
# Make sure not to use the same random seed for each call:
#random_seed = compute_random_seed({'0': state}, self.random_seed)
bo_algorithm = BayesianOptimizationAlgorithm(
initial_candidates_generator=self.random_generator,
initial_candidates_scorer=initial_candidates_scorer,
num_initial_candidates=self.num_initial_candidates,
local_optimizer=local_optimizer,
pending_candidate_state_transformer=None,
blacklisted_candidates=blacklisted_candidates,
num_requested_candidates=1,
greedy_batch_selection=False,
duplicate_detector=DuplicateDetectorIdentical(),
profiler=self.profiler,
sample_unique_candidates=False,
debug_log=self.debug_log)
# Next candidate decision
if self.do_profile:
self.profiler.start('total_nextcand')
_config = bo_algorithm.next_candidates()
if len(_config) == 0:
raise AssertionError(
"Failed to find a configuration not already chosen "
"before. Maybe there are no free configurations left? "
"The blacklist size is {}".format(len(blacklisted_candidates)))
config = _config[0]
if self.do_profile:
self.profiler.stop('total_nextcand')
if self.do_profile:
self.profiler.stop('total_all')
if self.debug_log is not None:
self.debug_log.set_final_config(config)
# All get_config debug log info is only written here
self.debug_log.write_block()
if self.do_profile:
self.profiler.clear()
# Pull out profiling data from this block, add to _profile_record
accumulate_profiling_record(
self._profile_record, self.profiler, pick_random)
return config
def evaluation_failed(self, config: Candidate):
# Remove pending candidate
self.state_transformer.drop_candidate(config)
# Mark config as failed (which means it will be blacklisted in
# future get_config calls)
self.state_transformer.mark_candidate_failed(config)
def dataset_size(self):
return len(self.state_transformer.state.candidate_evaluations)
def cumulative_profile_record(self):
"""
If profiling is activated, we sum up the profiling blocks for each
call of get_config and return it as dict. See get_config for what
is recorded:
- num_random: Number of get_config calls with random selection
- num_model: Number of get_config calls with model-based selection
- total_all: Sum of total times for all get_config calls
"""
return self._profile_record
def get_params(self):
"""
Note: Once MCMC is supported, this method will have to be refactored.
:return: Dictionary with current hyperparameter values
"""
return self.state_transformer.get_params()
def set_params(self, param_dict):
self.state_transformer.set_params(param_dict)
def get_state(self):
"""
The mutable state consists of the GP model parameters, the
TuningJobState, and the skip_optimization predicate (which can have a
mutable state).
We assume that skip_optimization can be pickled.
"""
state = {
'model_params': self.get_params(),
'state': encode_state(self.state_transformer.state),
'skip_optimization': self.state_transformer.skip_optimization,
'random_state': self.random_state}
if self.debug_log is not None:
state['debug_log'] = self.debug_log.get_mutable_state()
return state
def clone_from_state(self, state):
# Create clone with mutable state taken from 'state'
model_args = self.state_transformer._model_args
init_state = decode_state(state['state'], self.hp_ranges)
skip_optimization = state['skip_optimization']
new_searcher = GPFIFOSearcher(
hp_ranges=self.hp_ranges,
random_seed=self.random_seed,
gpmodel=self.state_transformer._gpmodel,
model_args=model_args,
map_reward=self.map_reward,
acquisition_class=self.acquisition_class,
init_state=init_state,
local_minimizer_class=self.local_minimizer_class,
skip_optimization=skip_optimization,
num_initial_candidates=self.num_initial_candidates,
num_initial_random_choices=self.num_initial_random_choices,
initial_scoring=self.initial_scoring,
profiler=self.profiler,
first_is_default=self.first_is_default,
debug_log=self.debug_log)
new_searcher.state_transformer.set_params(state['model_params'])
new_searcher.random_state = state['random_state']
new_searcher.random_generator.random_state = \
state['random_state']
if self.debug_log and 'debug_log' in state:
new_searcher.debug_log.set_mutable_state(state['debug_log'])
# Invalidate self (must not be used afterwards)
self.state_transformer = None
return new_searcher
def encode_state(state: TuningJobState) -> dict:
assert isinstance(state.hp_ranges, HyperparameterRanges_CS), \
"Must have hp_ranges of HyperparameterRanges_CS type"
candidate_evaluations = [
{'candidate': eval.candidate.get_dictionary(),
'metrics': eval.metrics}
for eval in state.candidate_evaluations]
failed_candidates = [x.get_dictionary() for x in state.failed_candidates]
pending_evaluations = [
eval.candidate.get_dictionary() for eval in state.pending_evaluations]
return {
'candidate_evaluations': candidate_evaluations,
'failed_candidates': failed_candidates,
'pending_evaluations': pending_evaluations}
def decode_state(enc_state: dict, hp_ranges: HyperparameterRanges_CS) \
-> TuningJobState:
assert isinstance(hp_ranges, HyperparameterRanges_CS), \
"Must have hp_ranges of HyperparameterRanges_CS type"
config_space = hp_ranges.config_space
def to_cs(x):
return CS.Configuration(config_space, values=x)
candidate_evaluations = [
CandidateEvaluation(to_cs(x['candidate']), x['metrics'])
for x in enc_state['candidate_evaluations']]
failed_candidates = [to_cs(x) for x in enc_state['failed_candidates']]
pending_evaluations = [
PendingEvaluation(to_cs(x)) for x in enc_state['pending_evaluations']]
return TuningJobState(
hp_ranges=hp_ranges,
candidate_evaluations=candidate_evaluations,
failed_candidates=failed_candidates,
pending_evaluations=pending_evaluations)
def map_reward(const=1.0) -> MapReward:
"""
Factory for map_reward argument in GPMultiFidelitySearcher.
"""
def const_minus_x(x):
return const - x
return MapReward(forward=const_minus_x, reverse=const_minus_x)
|
py | 7df85953a2c844ecaeb741d9732cbe75540729f9 | # -*- coding: utf-8 -*-
import os
from conans import tools, ConanFile, CMake
from conans import __version__ as conan_version
from conans.model.version import Version
from conans.errors import ConanInvalidConfiguration, NotFoundException, ConanException
available_versions = ["3.16.3", "3.16.2", "3.16.1", "3.16.0",
"3.15.5", "3.15.4", "3.15.3", "3.15.2", "3.15.1", "3.15.0",
"3.14.7", "3.14.6", "3.14.5", "3.14.4", "3.14.3", "3.14.2", "3.14.1", "3.14.0",
"3.13.4", "3.13.3", "3.13.2", "3.13.1", "3.13.0",
"3.12.4", "3.12.3", "3.12.2", "3.12.1", "3.12.0",
"3.11.4", "3.11.3", "3.11.2", "3.11.1", "3.11.0",
"3.10.3", "3.10.2", "3.10.1", "3.10.0",
"3.9.6", "3.9.5", "3.9.4", "3.9.3", "3.9.2", "3.9.1", "3.9.0",
"3.8.2", "3.8.1", "3.8.0",
"3.7.2", "3.7.1", "3.7.0",
"3.6.3", "3.6.2", "3.6.1", "3.6.0",
"3.5.2", "3.5.1", "3.5.0",
"3.4.3", "3.4.2", "3.4.1", "3.4.0",
"3.3.2", "3.3.1", "3.3.0",
"3.2.3", "3.2.2", "3.2.1", "3.2.0",
"3.1.3", "3.1.2", "3.1.1", "3.1.0",
"3.0.2", "3.0.1", "3.0.0",
"2.8.12"]
class CMakeInstallerConan(ConanFile):
name = "cmake_installer"
description = "creates cmake binaries package"
license = "BSD-3-clause"
url = "https://github.com/conan-community/conan-cmake-installer"
author = "Conan Community"
homepage = "https://github.com/Kitware/CMake"
topics = ("conan", "cmake", "build", "installer")
settings = "os_build", "arch_build", "compiler", "arch"
options = {"version": available_versions}
default_options = {"version": [v for v in available_versions if "-" not in v][0]}
exports = "LICENSE"
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _arch(self):
return self.settings.get_safe("arch_build") or self.settings.get_safe("arch")
@property
def _os(self):
return self.settings.get_safe("os_build") or self.settings.get_safe("os")
@property
def _cmake_version(self):
if "version" in self.options:
return str(self.options.version)
else:
return self.version
def _minor_version(self):
return ".".join(str(self._cmake_version).split(".")[:2])
def _get_filename(self):
os_id = {"Macos": "Darwin", "Windows": "win32"}.get(str(self._os),
str(self._os))
arch_id = {"x86": "i386"}.get(self._arch, self._arch) if self._os != "Windows" else "x86"
if self._os == "Linux" and self._cmake_version in ("2.8.12", "3.0.2") and \
self._arch == "x86_64":
arch_id = "i386"
if self._os == "Macos" and self._cmake_version == "2.8.12":
arch_id = "universal"
return "cmake-%s-%s-%s" % (self._cmake_version, os_id, arch_id)
def _get_filename_src(self):
return "cmake-%s" % self._cmake_version
def _build_from_source(self):
return os.path.exists(os.path.join(self.build_folder, self._source_subfolder, "configure"))
def config_options(self):
if self.version >= Version("2.8"): # Means CMake version
del self.options.version
def configure(self):
if self._os == "Macos" and self._arch == "x86":
raise ConanInvalidConfiguration("Not supported x86 for OSx")
def _download_source(self):
minor = self._minor_version()
ext = "tar.gz" if not self._os == "Windows" else "zip"
dest_file = "file.tgz" if self._os != "Windows" else "file.zip"
unzip_folder = self._get_filename()
def download_cmake(url, dest_file, unzip_folder):
self.output.info("Downloading: %s" % url)
tools.get(url, filename=dest_file, verify=False)
os.rename(unzip_folder, self._source_subfolder)
try:
url = "https://cmake.org/files/v%s/%s.%s" % (minor, self._get_filename(), ext)
download_cmake(url, dest_file, unzip_folder)
except NotFoundException:
if self.settings.get_safe("os_build") == "Windows":
raise ConanInvalidConfiguration("Building from sources under Windows is not supported")
url = "https://cmake.org/files/v%s/%s.%s" % (minor, self._get_filename_src(), ext)
unzip_folder = self._get_filename_src()
download_cmake(url, dest_file, unzip_folder)
def _configure_cmake(self):
cmake = CMake(self)
cmake.definitions["CMAKE_BOOTSTRAP"] = False
cmake.configure(source_dir=self._source_subfolder)
return cmake
def build(self):
self._download_source()
if self._build_from_source():
self.settings.arch = self.settings.arch_build # workaround for cross-building to get the correct arch during the build
cmake = self._configure_cmake()
cmake.build()
def package_id(self):
self.info.include_build_settings()
if self.settings.os_build == "Windows":
del self.info.settings.arch_build # same build is used for x86 and x86_64
del self.info.settings.arch
del self.info.settings.compiler
def package(self):
if self._build_from_source():
self.copy("Copyright.txt", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
else:
if self._os == "Macos":
appname = "CMake.app" if self._cmake_version != "2.8.12" else "CMake 2.8-12.app"
self.copy("*", dst="", src=os.path.join(self._source_subfolder, appname, "Contents"))
else:
self.copy("*", dst="", src=self._source_subfolder)
self.copy("Copyright.txt", dst="licenses", src=os.path.join(self._source_subfolder, "doc", "cmake"))
def package_info(self):
if self.package_folder is not None:
minor = self._minor_version()
self.env_info.PATH.append(os.path.join(self.package_folder, "bin"))
self.env_info.CMAKE_ROOT = self.package_folder
mod_path = os.path.join(self.package_folder, "share", "cmake-%s" % minor, "Modules")
self.env_info.CMAKE_MODULE_PATH = mod_path
if not os.path.exists(mod_path):
raise ConanException("Module path not found: %s" % mod_path)
else:
self.output.warn("No package folder have been created.")
|
py | 7df859b7c48ef48c5fdd2e5e214142c5b2592f3f | def add_collation(conn):
conn.create_collation('BILL_FIRST', BILL_FIRST)
def BILL_FIRST(left, right):
if 'bill' in left and 'bill' in right:
if left == right == min(left, right):
return 0
elif left == min(left, right):
return -1
else:
return 1
if 'bill' in left and 'bill' not in right:
return -1
if 'bill' not in left and 'bill' in right:
return 1
if 'bill' not in left and 'bill' not in right:
if left == right == min(left, right):
return 0
elif left == min(left, right):
return -1
else:
return 1 |
py | 7df859d7a2a124ba27637fc872a3071b3cfec1c0 | # coding: utf-8
"""
Purity//FB REST Client
Client for Purity//FB REST API (1.0 - 1.2), developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class SmbRule(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'enabled': 'bool',
'acl_mode': 'str'
}
attribute_map = {
'enabled': 'enabled',
'acl_mode': 'acl-mode'
}
def __init__(self, enabled=None, acl_mode=None):
"""
SmbRule - a model defined in Swagger
"""
self._enabled = None
self._acl_mode = None
if enabled is not None:
self.enabled = enabled
if acl_mode is not None:
self.acl_mode = acl_mode
@property
def enabled(self):
"""
Gets the enabled of this SmbRule.
is the protocol enabled? Default false when creating a new rule
:return: The enabled of this SmbRule.
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""
Sets the enabled of this SmbRule.
is the protocol enabled? Default false when creating a new rule
:param enabled: The enabled of this SmbRule.
:type: bool
"""
self._enabled = enabled
@property
def acl_mode(self):
"""
Gets the acl_mode of this SmbRule.
SMB ACL mode. Default shared when creating a new file system. Possible values are shared and native.
:return: The acl_mode of this SmbRule.
:rtype: str
"""
return self._acl_mode
@acl_mode.setter
def acl_mode(self, acl_mode):
"""
Sets the acl_mode of this SmbRule.
SMB ACL mode. Default shared when creating a new file system. Possible values are shared and native.
:param acl_mode: The acl_mode of this SmbRule.
:type: str
"""
self._acl_mode = acl_mode
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, SmbRule):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
py | 7df85a80709105842c7ace471bd0ef753376ca20 | import os, sys, argparse, subprocess, signal
# Project defaults
FLASK_APP = 'server/__init__.py'
DEFAULT_IP = '0.0.0.0:3334'
SPOTIPY_CLIENT_ID = "07babcfeaea747f0967bb1d5145fc21f"
SPOTIPY_CLIENT_SECRET = "45e1dbda55da450b808e50aaf728365b"
SPOTIPY_REDIRECT_URI = "https://spotify-stats.mybluemix.net/"
class Command:
def __init__(self, name, descr, runcmd, env={}):
self.name = name
self.descr = descr
self.runcmd = runcmd
self.env = env
self.env["SPOTIPY_CLIENT_ID"] = "07babcfeaea747f0967bb1d5145fc21f"
self.env["SPOTIPY_CLIENT_SECRET"] = "45e1dbda55da450b808e50aaf728365b"
self.env["SPOTIPY_REDIRECT_URI"] = "https://spotify-stats.mybluemix.net/"
def run(self, conf):
cmd = self.runcmd(conf)
env = os.environ
env.update(conf)
env.update(self.env)
subprocess.call(cmd, env=env, shell=True)
env["SPOTIPY_CLIENT_ID"] = "07babcfeaea747f0967bb1d5145fc21f"
env["SPOTIPY_CLIENT_SECRET"] = "45e1dbda55da450b808e50aaf728365b"
env["SPOTIPY_REDIRECT_URI"] = "https://spotify-stats.mybluemix.net/"
class CommandManager:
def __init__(self):
self.commands = {}
def add(self, command):
self.commands[command.name] = command
def configure(self, conf):
self.conf = conf
def run(self, command):
if command in self.commands:
self.commands[command].run(self.conf)
else:
print("invalid command specified\n")
print(self.availableCommands())
def availableCommands(self):
commands = sorted(self.commands.values(), key=lambda c: c.name)
space = max([len(c.name) for c in commands]) + 2
description = 'available subcommands:\n'
for c in commands:
description += ' ' + c.name + ' ' * (space - len(c.name)) + c.descr + '\n'
return description
cm = CommandManager()
cm.add(Command(
"build",
"compiles python files in project into .pyc binaries",
lambda c: 'python -m compileall .'))
cm.add(Command(
"start",
"runs server with gunicorn in a production setting",
lambda c: 'gunicorn -b {0}:{1} server:app'.format(c['host'], c['port']),
{
'FLASK_APP': FLASK_APP,
'FLASK_DEBUG': 'false'
}))
cm.add(Command(
"run",
"runs dev server using Flask's native debugger & backend reloader",
lambda c: 'python -m flask run --host={0} --port={1} --debugger --reload'.format(c['host'], c['port']),
{
'FLASK_APP': FLASK_APP,
'FLASK_DEBUG': 'true'
}))
cm.add(Command(
"livereload",
"runs dev server using livereload for dynamic webpage reloading",
lambda c: 'python -m flask run',
{
'FLASK_APP': FLASK_APP,
'FLASK_LIVE_RELOAD': 'true',
}))
cm.add(Command(
"debug",
"runs dev server in debug mode; use with an IDE's remote debugger",
lambda c: 'python -m flask run --host={0} --port={1} --no-debugger --no-reload'.format(c['host'], c['port']),
{
'FLASK_APP': FLASK_APP,
'FLASK_DEBUG': 'true'
}))
cm.add(Command(
"test",
"runs all tests inside of `tests` directory",
lambda c: 'python -m unittest discover -s tests -p "*.py"'))
# Create and format argument parser for CLI
parser = argparse.ArgumentParser(description=cm.availableCommands(),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("subcommand", help="subcommand to run (see list above)")
parser.add_argument("ipaddress", nargs='?', default=DEFAULT_IP,
help="address and port to run on (i.e. {0})".format(DEFAULT_IP))
def livereload_check():
check = subprocess.call("lsof -n -i4TCP:3334", shell=True)
if (check == 0):
output = subprocess.check_output("pgrep Python", shell=True)
pypid = int(output)
os.kill(pypid, signal.SIGKILL)
print("Discovered rogue Python process: {0}".format(pypid))
print("Killing PID {0}...".format(pypid))
else:
print(" No rogue Python process running")
# Take in command line input for configuration
try:
args = parser.parse_args()
cmd = args.subcommand
addr = args.ipaddress.split(':')
cm.configure({
'host': addr[0],
'port': addr[1],
})
cm.run(cmd)
except KeyboardInterrupt:
if 'FLASK_LIVE_RELOAD' in os.environ and os.environ['FLASK_LIVE_RELOAD'] == 'true':
livereload_check()
except:
if len(sys.argv) == 1:
print(cm.availableCommands())
sys.exit(0)
|
py | 7df85b35797bac4fea86691d9061bb8e8ece2dc0 | #!/usr/bin/env python3.6
import datetime
import discord
from discord.ext import commands
class Misc:
"""
Miscellaneous commands
"""
def __init__(self, bot):
self.bot = bot
print("{} addon loaded.".format(self.__class__.__name__))
@commands.command(pass_context=True)
async def ping(self, ctx):
"""Pong!"""
# https://github.com/appu1232/Discord-Selfbot/blob/master/cogs/misc.py#L595
msgtime = ctx.message.created_at.now()
await (await self.bot.ws.ping())
now = datetime.datetime.now()
ping = now - msgtime
return await ctx.send(":ping_pong:! Response Time: {} ms".format(str(ping.microseconds / 1000.0)))
@commands.command(pass_context=True, aliases=['mc'])
async def membercount(self, ctx):
"""Prints current member count"""
return await ctx.send(str(self.bot.guild.name)+" currently has " + str(len(self.bot.guild.members)) + " members!")
@commands.command()
async def about(self, ctx):
"""About Link."""
return await ctx.send("View my source code here: https://github.com/T3CHNOLOG1C/Link")
def setup(bot):
bot.add_cog(Misc(bot))
|
py | 7df85b7c47945fed88e3fa6ff228e7e6b3da871b | # Copyright (c) 2018, 2019, Oracle and/or its affiliates.
# Copyright (c) 2013, Regents of the University of California
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
def test_add_long_overflow():
# max long value is written as long primitive
val = 0x7fffffffffffffff
assert val + 128 == 0x800000000000007f
def test_mul_long_overflow():
# max long value is written as long primitive
val = 0x7fffffffffffffff
assert val * 2 == 0xfffffffffffffffe
def test_add_bool():
assert True.__add__(True) == 2, "True.__add__(True)"
assert True.__add__(False) == 1, "True.__add__(False)"
assert False.__add__(True) == 1, "False.__add__(True)"
assert False.__add__(False) == 0, "False.__add__(False)"
assert False.__add__(0) == 0, "False.__add__(0)"
assert True.__add__(0) == 1, "True.__add__(0)"
assert False.__add__(1) == 1, "False.__add__(1)"
assert True.__add__(1) == 2, "True.__add__(1)"
assert False.__add__(0x7fffffff) == 0x7fffffff, "False.__add__(0x7fffffff)"
assert True.__add__(0x7fffffff) == 0x80000000, "True.__add__(0x7fffffff)"
assert False.__add__(0x7fffffffffffffff) == 0x7fffffffffffffff, "False.__add__(0x7fffffffffffffff)"
assert True.__add__(0x7fffffffffffffff) == 0x8000000000000000, "True.__add__(0x7fffffffffffffff)"
assert True.__add__(0.0) == NotImplemented, "True.__add__(0.0)"
def test_mul_bool():
assert False.__mul__(False) == 0, "False.__mul__(False)"
assert False.__mul__(True) == 0, "False.__mul__(True)"
assert False.__mul__(1) == 0, "False.__mul__(1)"
assert False.__mul__(0) == 0, "False.__mul__(0)"
assert True.__mul__(0) == 0, "True.__mul__(0)"
assert True.__mul__(1) == 1, "True.__mul__(1)"
assert True.__mul__(0x7fffffff) == 0x7fffffff, "True.__mul__(0x7fffffff)"
assert True.__mul__(0x7fffffffffffffff) == 0x7fffffffffffffff, "True.__mul__(0x7fffffffffffffff)"
assert True.__mul__(0.0) == NotImplemented, "True.__mul__(0.0)"
assert True * 1.0 == 1.0, "True * 1.0"
assert False * 1.0 == 0.0, "False * 0.0"
def expectError(callable):
try:
callable()
return False
except TypeError as e:
return True
def test_bin_comparison():
class A:
pass
class B:
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __lt__(self, other):
return True
def __le__(self, other):
return True
a = A()
b = B()
assert a > b, "Comparison 'a > b' failed"
assert a >= b, "Comparison 'a >= b' failed"
assert a < b, "Comparison 'a < b' failed"
assert a <= b, "Comparison 'a <= b' failed"
assert b > a, "Comparison 'b > a' failed"
assert b >= a, "Comparison 'b >= a' failed"
assert b < a, "Comparison 'b < a' failed"
assert b <= a, "Comparison 'b <= a' failed"
def test_bin_comparison_wo_eq_ne():
class A():
def __eq__(self, o):
return NotImplemented
assert A() != A()
a = A()
assert a == a
try:
assert a <= a
except TypeError:
pass
else:
assert False
def test_floor_div():
assert True // True == True
assert True // 2 == 0
assert True // 2.0 == 0.0
assert 0 // True == 0
assert 3 // 2 == 1
assert 15 // 5 == 3
assert 0 // 1 == 0
assert 15.5 // True == 15.0
assert 15.5 // 5 == 3.0
assert 16.5 // 5.5 == 3.0
assert 16.5 // 3.2 == 5.0
assert_exception(lambda: True // False, ZeroDivisionError)
assert_exception(lambda: True // 0, ZeroDivisionError)
assert_exception(lambda: True // 0.0, ZeroDivisionError)
assert_exception(lambda: 3 // False, ZeroDivisionError)
assert_exception(lambda: 3 // 0, ZeroDivisionError)
assert_exception(lambda: 3 // 0.0, ZeroDivisionError)
assert_exception(lambda: 3.0 // False, ZeroDivisionError)
assert_exception(lambda: 5.4 // 0, ZeroDivisionError)
assert_exception(lambda: 5.4 // 0.0, ZeroDivisionError)
def test_divmod():
class Floatable:
def __init__(self, val):
self.val = val
def __float__(self):
return self.val
def doDivmod(a, b):
return divmod(a, b)
argList = [(Floatable(3), Floatable(4)), (complex(1,2), complex(3,4))]
for args in argList:
assert_exception(lambda: doDivmod(*args), TypeError)
def test_subclass_ordered_binop():
class A(int):
def __add__(self, other):
return 0xa
class B(A):
def __add__(self, other):
return 0xb
class C(B):
__radd__ = B.__add__
assert A(1) + A(1) == 0xa
assert 1 + A(10) == 11
assert A(10) + 1 == 0xa
# TODO: we're doing this wrong right now
# assert A(1) + B(1) == 0xa
assert B(1) + A(2) == 0xb
assert A(1) + C(3) == 0xb
assert C(3) + A(1) == 0xb
def assert_exception(op, ex_type):
try:
op()
assert False, "expected exception %s" % ex_type
except BaseException as e:
if type(e) == AssertionError:
raise e
else:
assert type(e) == ex_type, "expected exception %s but got %s" % (ex_type, type(e))
|
py | 7df85bc5c662ed053401f7ccc9ef9fdba453291e | ## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
# fetch values from package.xml
setup_args = generate_distutils_setup(
packages=['augmented_vr_module'],
package_dir={'': 'src'})
setup(**setup_args)
|
py | 7df85e4ba7abd9b9de1198a89c1751161040b082 | import re
import os
import glob
from maya import cmds
import pyblish.api
from pype.hosts.maya import lib
SHAPE_ATTRS = ["castsShadows",
"receiveShadows",
"motionBlur",
"primaryVisibility",
"smoothShading",
"visibleInReflections",
"visibleInRefractions",
"doubleSided",
"opposite"]
SHAPE_ATTRS = set(SHAPE_ATTRS)
def get_look_attrs(node):
"""Returns attributes of a node that are important for the look.
These are the "changed" attributes (those that have edits applied
in the current scene).
Returns:
list: Attribute names to extract
"""
# When referenced get only attributes that are "changed since file open"
# which includes any reference edits, otherwise take *all* user defined
# attributes
is_referenced = cmds.referenceQuery(node, isNodeReferenced=True)
result = cmds.listAttr(node, userDefined=True,
changedSinceFileOpen=is_referenced) or []
# `cbId` is added when a scene is saved, ignore by default
if "cbId" in result:
result.remove("cbId")
# For shapes allow render stat changes
if cmds.objectType(node, isAType="shape"):
attrs = cmds.listAttr(node, changedSinceFileOpen=True) or []
for attr in attrs:
if attr in SHAPE_ATTRS:
result.append(attr)
elif attr.startswith('ai'):
result.append(attr)
return result
def node_uses_image_sequence(node):
"""Return whether file node uses an image sequence or single image.
Determine if a node uses an image sequence or just a single image,
not always obvious from its file path alone.
Args:
node (str): Name of the Maya node
Returns:
bool: True if node uses an image sequence
"""
# useFrameExtension indicates an explicit image sequence
node_path = get_file_node_path(node).lower()
# The following tokens imply a sequence
patterns = ["<udim>", "<tile>", "<uvtile>", "u<u>_v<v>", "<frame0"]
return (cmds.getAttr('%s.useFrameExtension' % node) or
any(pattern in node_path for pattern in patterns))
def seq_to_glob(path):
"""Takes an image sequence path and returns it in glob format,
with the frame number replaced by a '*'.
Image sequences may be numerical sequences, e.g. /path/to/file.1001.exr
will return as /path/to/file.*.exr.
Image sequences may also use tokens to denote sequences, e.g.
/path/to/texture.<UDIM>.tif will return as /path/to/texture.*.tif.
Args:
path (str): the image sequence path
Returns:
str: Return glob string that matches the filename pattern.
"""
if path is None:
return path
# If any of the patterns, convert the pattern
patterns = {
"<udim>": "<udim>",
"<tile>": "<tile>",
"<uvtile>": "<uvtile>",
"#": "#",
"u<u>_v<v>": "<u>|<v>",
"<frame0": "<frame0\d+>",
"<f>": "<f>"
}
lower = path.lower()
has_pattern = False
for pattern, regex_pattern in patterns.items():
if pattern in lower:
path = re.sub(regex_pattern, "*", path, flags=re.IGNORECASE)
has_pattern = True
if has_pattern:
return path
base = os.path.basename(path)
matches = list(re.finditer(r'\d+', base))
if matches:
match = matches[-1]
new_base = '{0}*{1}'.format(base[:match.start()],
base[match.end():])
head = os.path.dirname(path)
return os.path.join(head, new_base)
else:
return path
def get_file_node_path(node):
"""Get the file path used by a Maya file node.
Args:
node (str): Name of the Maya file node
Returns:
str: the file path in use
"""
# if the path appears to be sequence, use computedFileTextureNamePattern,
# this preserves the <> tag
if cmds.attributeQuery('computedFileTextureNamePattern',
node=node,
exists=True):
plug = '{0}.computedFileTextureNamePattern'.format(node)
texture_pattern = cmds.getAttr(plug)
patterns = ["<udim>",
"<tile>",
"u<u>_v<v>",
"<f>",
"<frame0",
"<uvtile>"]
lower = texture_pattern.lower()
if any(pattern in lower for pattern in patterns):
return texture_pattern
if cmds.nodeType(node) == 'aiImage':
return cmds.getAttr('{0}.filename'.format(node))
# otherwise use fileTextureName
return cmds.getAttr('{0}.fileTextureName'.format(node))
def get_file_node_files(node):
"""Return the file paths related to the file node
Note:
Will only return existing files. Returns an empty list
if not valid existing files are linked.
Returns:
list: List of full file paths.
"""
path = get_file_node_path(node)
path = cmds.workspace(expandName=path)
if node_uses_image_sequence(node):
glob_pattern = seq_to_glob(path)
return glob.glob(glob_pattern)
elif os.path.exists(path):
return [path]
else:
return []
class CollectLook(pyblish.api.InstancePlugin):
"""Collect look data for instance.
For the shapes/transforms of the referenced object to collect look for
retrieve the user-defined attributes (like V-ray attributes) and their
values as they were created in the current scene.
For the members of the instance collect the sets (shadingEngines and
other sets, e.g. VRayDisplacement) they are in along with the exact
membership relations.
Collects:
lookAttribtutes (list): Nodes in instance with their altered attributes
lookSetRelations (list): Sets and their memberships
lookSets (list): List of set names included in the look
"""
order = pyblish.api.CollectorOrder + 0.2
families = ["look"]
label = "Collect Look"
hosts = ["maya"]
maketx = True
def process(self, instance):
"""Collect the Look in the instance with the correct layer settings"""
with lib.renderlayer(instance.data["renderlayer"]):
self.collect(instance)
def collect(self, instance):
self.log.info("Looking for look associations "
"for %s" % instance.data['name'])
# Discover related object sets
self.log.info("Gathering sets..")
sets = self.collect_sets(instance)
# Lookup set (optimization)
instance_lookup = set(cmds.ls(instance, long=True))
self.log.info("Gathering set relations..")
# Ensure iteration happen in a list so we can remove keys from the
# dict within the loop
for objset in list(sets):
self.log.debug("From %s.." % objset)
# Get all nodes of the current objectSet (shadingEngine)
for member in cmds.ls(cmds.sets(objset, query=True), long=True):
member_data = self.collect_member_data(member,
instance_lookup)
if not member_data:
continue
# Add information of the node to the members list
sets[objset]["members"].append(member_data)
# Remove sets that didn't have any members assigned in the end
# Thus the data will be limited to only what we need.
self.log.info("objset {}".format(sets[objset]))
if not sets[objset]["members"] or (not objset.endswith("SG")):
self.log.info("Removing redundant set information: "
"%s" % objset)
sets.pop(objset, None)
self.log.info("Gathering attribute changes to instance members..")
attributes = self.collect_attributes_changed(instance)
# Store data on the instance
instance.data["lookData"] = {"attributes": attributes,
"relationships": sets}
# Collect file nodes used by shading engines (if we have any)
files = list()
looksets = sets.keys()
shaderAttrs = [
"surfaceShader",
"volumeShader",
"displacementShader",
"aiSurfaceShader",
"aiVolumeShader"]
materials = list()
if looksets:
for look in looksets:
for at in shaderAttrs:
try:
con = cmds.listConnections("{}.{}".format(look, at))
except ValueError:
# skip attributes that are invalid in current
# context. For example in the case where
# Arnold is not enabled.
continue
if con:
materials.extend(con)
self.log.info("Found materials:\n{}".format(materials))
self.log.info("Found the following sets:\n{}".format(looksets))
# Get the entire node chain of the look sets
# history = cmds.listHistory(looksets)
history = list()
for material in materials:
history.extend(cmds.listHistory(material))
files = cmds.ls(history, type="file", long=True)
files.extend(cmds.ls(history, type="aiImage", long=True))
self.log.info("Collected file nodes:\n{}".format(files))
# Collect textures if any file nodes are found
instance.data["resources"] = []
for n in files:
instance.data["resources"].append(self.collect_resource(n))
self.log.info("Collected resources: {}".format(instance.data["resources"]))
# Log a warning when no relevant sets were retrieved for the look.
if not instance.data["lookData"]["relationships"]:
self.log.warning("No sets found for the nodes in the instance: "
"%s" % instance[:])
# Ensure unique shader sets
# Add shader sets to the instance for unify ID validation
instance.extend(shader for shader in looksets if shader
not in instance_lookup)
self.log.info("Collected look for %s" % instance)
def collect_sets(self, instance):
"""Collect all objectSets which are of importance for publishing
It checks if all nodes in the instance are related to any objectSet
which need to be
Args:
instance (list): all nodes to be published
Returns:
dict
"""
sets = dict()
for node in instance:
related_sets = lib.get_related_sets(node)
if not related_sets:
continue
for objset in related_sets:
if objset in sets:
continue
sets[objset] = {"uuid": lib.get_id(objset), "members": list()}
return sets
def collect_member_data(self, member, instance_members):
"""Get all information of the node
Args:
member (str): the name of the node to check
instance_members (set): the collected instance members
Returns:
dict
"""
node, components = (member.rsplit(".", 1) + [None])[:2]
# Only include valid members of the instance
if node not in instance_members:
return
node_id = lib.get_id(node)
if not node_id:
self.log.error("Member '{}' has no attribute 'cbId'".format(node))
return
member_data = {"name": node, "uuid": node_id}
if components:
member_data["components"] = components
return member_data
def collect_attributes_changed(self, instance):
"""Collect all userDefined attributes which have changed
Each node gets checked for user defined attributes which have been
altered during development. Each changes gets logged in a dictionary
[{name: node,
uuid: uuid,
attributes: {attribute: value}}]
Args:
instance (list): all nodes which will be published
Returns:
list
"""
attributes = []
for node in instance:
# Collect changes to "custom" attributes
node_attrs = get_look_attrs(node)
self.log.info(
"Node \"{0}\" attributes: {1}".format(node, node_attrs)
)
# Only include if there are any properties we care about
if not node_attrs:
continue
node_attributes = {}
for attr in node_attrs:
if not cmds.attributeQuery(attr, node=node, exists=True):
continue
attribute = "{}.{}".format(node, attr)
node_attributes[attr] = cmds.getAttr(attribute)
attributes.append({"name": node,
"uuid": lib.get_id(node),
"attributes": node_attributes})
return attributes
def collect_resource(self, node):
"""Collect the link to the file(s) used (resource)
Args:
node (str): name of the node
Returns:
dict
"""
self.log.debug("processing: {}".format(node))
if cmds.nodeType(node) == 'file':
self.log.debug(" - file node")
attribute = "{}.fileTextureName".format(node)
computed_attribute = "{}.computedFileTextureNamePattern".format(node)
elif cmds.nodeType(node) == 'aiImage':
self.log.debug("aiImage node")
attribute = "{}.filename".format(node)
computed_attribute = attribute
source = cmds.getAttr(attribute)
self.log.info(" - file source: {}".format(source))
color_space_attr = "{}.colorSpace".format(node)
color_space = cmds.getAttr(color_space_attr)
# Compare with the computed file path, e.g. the one with the <UDIM>
# pattern in it, to generate some logging information about this
# difference
# computed_attribute = "{}.computedFileTextureNamePattern".format(node)
computed_source = cmds.getAttr(computed_attribute)
if source != computed_source:
self.log.debug("Detected computed file pattern difference "
"from original pattern: {0} "
"({1} -> {2})".format(node,
source,
computed_source))
# We replace backslashes with forward slashes because V-Ray
# can't handle the UDIM files with the backslashes in the
# paths as the computed patterns
source = source.replace("\\", "/")
files = get_file_node_files(node)
if len(files) == 0:
self.log.error("No valid files found from node `%s`" % node)
self.log.info("collection of resource done:")
self.log.info(" - node: {}".format(node))
self.log.info(" - attribute: {}".format(attribute))
self.log.info(" - source: {}".format(source))
self.log.info(" - file: {}".format(files))
self.log.info(" - color space: {}".format(color_space))
# Define the resource
return {"node": node,
"attribute": attribute,
"source": source, # required for resources
"files": files,
"color_space": color_space} # required for resources
|
py | 7df85e4d5b76e4c94237bff4b553241ec938b3b1 | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for stompd node under test"""
import decimal
import errno
import http.client
import json
import logging
import os
import re
import subprocess
import time
from .authproxy import JSONRPCException
from .util import (
assert_equal,
delete_cookie_file,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
)
# For Python 3.4 compatibility
JSONDecodeError = getattr(json, "JSONDecodeError", ValueError)
BITCOIND_PROC_WAIT_TIMEOUT = 600
class TestNode():
"""A class for representing a stompd node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, dirname, extra_args, rpchost, timewait, binary, stderr, mocktime, coverage_dir, use_cli=False):
self.index = i
self.datadir = os.path.join(dirname, "node" + str(i))
self.rpchost = rpchost
if timewait:
self.rpc_timeout = timewait
else:
# Wait for up to 60 seconds for the RPC server to respond
self.rpc_timeout = 600
if binary is None:
self.binary = os.getenv("BITCOIND", "stompd")
else:
self.binary = binary
self.stderr = stderr
self.coverage_dir = coverage_dir
# Most callers will just need to add extra args to the standard list below. For those callers that need more flexibity, they can just set the args property directly.
self.extra_args = extra_args
self.args = [self.binary, "-datadir=" + self.datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(mocktime), "-uacomment=testnode%d" % i]
self.cli = TestNodeCLI(os.getenv("BITCOINCLI", "stomp-cli"), self.datadir)
self.use_cli = use_cli
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
self.p2ps = []
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print("Cleaning up leftover process")
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, "Error: no RPC connection"
return getattr(self.rpc, name)
def start(self, extra_args=None, stderr=None, *args, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
if stderr is None:
stderr = self.stderr
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir)
self.process = subprocess.Popen(self.args + extra_args, stderr=stderr, *args, **kwargs)
self.running = True
self.log.debug("stompd started, waiting for RPC to come up")
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the stompd process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
time.sleep(5)
for _ in range(poll_per_s * self.rpc_timeout):
assert self.process.poll() is None, "stompd exited with status %i during initialization" % self.process.returncode
try:
self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
while self.rpc.getblockcount() < 0:
time.sleep(1)
# If the call to getblockcount() succeeds then the RPC connection is up
self.rpc_connected = True
self.url = self.rpc.url
self.log.debug("RPC successfully started")
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
raise AssertionError("Unable to connect to stompd")
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return self.cli("-rpcwallet={}".format(wallet_name))
else:
assert self.rpc_connected
assert self.rpc
wallet_path = "wallet/%s" % wallet_name
return self.rpc / wallet_path
def stop_node(self):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
time.sleep(20)
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert_equal(return_code, 0)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
def node_encrypt_wallet(self, passphrase):
""""Encrypts the wallet.
This causes stompd to shutdown, so this method takes
care of cleaning up resources."""
self.encryptwallet(passphrase)
self.wait_until_stopped()
def add_p2p_connection(self, p2p_conn, *args, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(*args, **kwargs)
self.p2ps.append(p2p_conn)
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, "No p2p connection"
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
class TestNodeCLI():
"""Interface to stomp-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.bitcoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with stomp-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run stomp-cli command. Deserializes returned string as python object."""
pos_args = [str(arg) for arg in args]
named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same stomp-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running bitcoin-cli command: %s" % command)
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except JSONDecodeError:
return cli_stdout.rstrip("\n")
|
py | 7df85eaf1c2028216789a9d0b8de5ada6b34df4c | from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/jacobi-2d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/jacobi-2d/tmp_files/8677.c')
procedure('kernel_jacobi_1d')
loop(0)
known(' n > 2 ')
tile(0,2,16,2)
tile(0,4,16,4)
tile(1,2,16,2)
tile(1,4,16,4)
|
py | 7df85ed1b1f6ce99c2d5df71097b731dee68912f | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import base64
import os
import socket
import urllib2
from contextlib import closing
import time
from ambari_commons.exceptions import FatalException, NonFatalException
from ambari_commons.logging_utils import get_verbose, print_info_msg, get_debug_mode
from ambari_commons.os_check import OSConst
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons.os_utils import run_os_command
# simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
import ambari_simplejson as json
from ambari_server.resourceFilesKeeper import ResourceFilesKeeper, KeeperException
from ambari_server.serverConfiguration import configDefaults, PID_NAME, get_resources_location, \
get_stack_location, CLIENT_API_PORT, CLIENT_API_PORT_PROPERTY, \
SSL_API, DEFAULT_SSL_API_PORT, SSL_API_PORT
from ambari_server.userInput import get_validated_string_input
# Ambari server API properties
SERVER_API_HOST = '127.0.0.1'
SERVER_API_PROTOCOL = 'http'
SERVER_API_SSL_PROTOCOL = 'https'
@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
def is_server_runing():
pid_file_path = os.path.join(configDefaults.PID_DIR, PID_NAME)
if os.path.exists(pid_file_path):
try:
f = open(pid_file_path, "r")
except IOError, ex:
raise FatalException(1, str(ex))
pid = f.readline().strip()
if not pid.isdigit():
err = "'%s' is incorrect PID value. %s is corrupt. Removing" % (pid, pid_file_path)
f.close()
run_os_command("rm -f " + pid_file_path)
raise NonFatalException(err)
f.close()
retcode, out, err = run_os_command("ps -p " + pid)
if retcode == 0:
return True, int(pid)
else:
return False, None
else:
return False, None
def wait_for_server_to_stop(wait_timeout):
start_time = time.time()
is_timeout = lambda: time.time() - start_time > wait_timeout
while is_server_runing()[0] and not is_timeout():
time.sleep(0.1)
return not is_timeout()
@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
def is_server_runing():
from ambari_commons.os_windows import SERVICE_STATUS_STARTING, SERVICE_STATUS_RUNNING, \
SERVICE_STATUS_STOPPING, \
SERVICE_STATUS_STOPPED, SERVICE_STATUS_NOT_INSTALLED
from ambari_windows_service import AmbariServerService
statusStr = AmbariServerService.QueryStatus()
if statusStr in (SERVICE_STATUS_STARTING, SERVICE_STATUS_RUNNING, SERVICE_STATUS_STOPPING):
return True, ""
elif statusStr == SERVICE_STATUS_STOPPED:
return False, SERVICE_STATUS_STOPPED
elif statusStr == SERVICE_STATUS_NOT_INSTALLED:
return False, SERVICE_STATUS_NOT_INSTALLED
else:
return False, None
#
# Performs HDP stack housekeeping
#
def refresh_stack_hash(properties):
resources_location = get_resources_location(properties)
stacks_location = get_stack_location(properties)
resource_files_keeper = ResourceFilesKeeper(resources_location, stacks_location)
try:
print "Organizing resource files at {0}...".format(resources_location,
verbose=get_verbose())
resource_files_keeper.perform_housekeeping()
except KeeperException, ex:
msg = "Can not organize resource files at {0}: {1}".format(
resources_location, str(ex))
raise FatalException(-1, msg)
#
# Builds ambari-server API base url
# Reads server protocol/port from configuration
# And returns something like
# http://127.0.0.1:8080/api/v1/
# or if using ssl https://hostname.domain:8443/api/v1
#
def get_ambari_server_api_base(properties):
api_host = SERVER_API_HOST
api_protocol = SERVER_API_PROTOCOL
api_port = CLIENT_API_PORT
api_port_prop = properties.get_property(CLIENT_API_PORT_PROPERTY)
if api_port_prop is not None and api_port_prop != '':
api_port = api_port_prop
api_ssl = False
api_ssl_prop = properties.get_property(SSL_API)
if api_ssl_prop is not None:
api_ssl = api_ssl_prop.lower() == "true"
if api_ssl:
api_host = socket.getfqdn()
api_protocol = SERVER_API_SSL_PROTOCOL
api_port = DEFAULT_SSL_API_PORT
api_port_prop = properties.get_property(SSL_API_PORT)
if api_port_prop is not None:
api_port = api_port_prop
return '{0}://{1}:{2!s}/api/v1/'.format(api_protocol, api_host, api_port)
def get_ambari_admin_username_password_pair(options):
"""
Returns the Ambari administrator credential.
If not supplied via command line options, the user is queried for the username and password.
:param options: the collected command line options
:return: the Ambari admin credentials
"""
admin_login = options.ambari_admin_username \
if hasattr(options, 'ambari_admin_username') and options.ambari_admin_username is not None \
else get_validated_string_input("Enter Ambari Admin login: ", None, None, None, False, False)
admin_password = options.ambari_admin_password \
if hasattr(options, 'ambari_admin_password') and options.ambari_admin_password is not None \
else get_validated_string_input("Enter Ambari Admin password: ", None, None, None, True, False)
return admin_login, admin_password
def get_cluster_name(properties, admin_login, admin_password):
"""
Fetches the name of the first cluster (in case there are more)
from the response of host:port/api/v1/clusters call
"""
print_info_msg('Fetching cluster name')
cluster_name = None
response_code, json_data = get_json_via_rest_api(properties, admin_login, admin_password,
"clusters")
if json_data and 'items' in json_data:
items = json_data['items']
if len(items) > 0:
cluster_name = items[0]['Clusters']['cluster_name']
print_info_msg('Found cluster name: %s' % cluster_name)
return cluster_name
def get_json_via_rest_api(properties, admin_login, admin_password, entry_point):
"""
Fetches the data from a given REST API entry point
:param properties: the properties from the ambari.properties file
:param admin_login: an administrator's username used to log in to Ambari
:param admin_password: an administrator's password used to log in to Ambari
:param entry_point: the relative entry point to query (the base URL will be generated using the ambari.properties data)
:return: HTTP status, JSON data
"""
url = get_ambari_server_api_base(properties) + entry_point
admin_auth = base64.encodestring('%s:%s' % (admin_login, admin_password)).replace('\n', '')
request = urllib2.Request(url)
request.add_header('Authorization', 'Basic %s' % admin_auth)
request.add_header('X-Requested-By', 'ambari')
request.get_method = lambda: 'GET'
print_info_msg("Fetching information from Ambari's REST API")
with closing(urllib2.urlopen(request)) as response:
response_status_code = response.getcode()
json_data = None
print_info_msg(
"Received HTTP %s while fetching information from Ambari's REST API" % response_status_code)
if response_status_code == 200:
json_data = json.loads(response.read())
if (get_debug_mode()):
print_info_msg("Received JSON:\n" + json_data)
return response_status_code, json_data
def perform_changes_via_rest_api(properties, admin_login, admin_password, url_postfix, get_method,
request_data=None):
url = get_ambari_server_api_base(properties) + url_postfix
admin_auth = base64.encodestring('%s:%s' % (admin_login, admin_password)).replace('\n', '')
request = urllib2.Request(url)
request.add_header('Authorization', 'Basic %s' % admin_auth)
request.add_header('X-Requested-By', 'ambari')
if request_data is not None:
request.add_data(json.dumps(request_data))
request.get_method = lambda: get_method
with closing(urllib2.urlopen(request)) as response:
response_status_code = response.getcode()
if response_status_code not in (200, 201):
err = 'Error while performing changes via Ambari REST API. Http status code - ' + str(
response_status_code)
raise FatalException(1, err)
|
py | 7df860c8160e6ace95a73c683ad71fee59ba041a | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from gaiatest import GaiaTestCase
class TestResources(GaiaTestCase):
filename = 'IMG_0001.jpg'
destination = 'DCIM/100MZLLA'
def test_push_resource(self):
self.push_resource('IMG_0001.jpg', destination=self.destination)
# A fully qualified path is returned from the api
remote_filepath = '/'.join(['/sdcard', self.destination, self.filename])
self.assertTrue(remote_filepath in self.data_layer.media_files)
def test_push_multiple_resources(self):
count = 5
self.push_resource(self.filename, count, destination=self.destination)
for i in range(1, count + 1):
remote_filename = '_%s.'.join(iter(self.filename.split('.'))) % i
# A fully qualified path is returned from the api
remote_filepath = '/'.join(['/sdcard', self.destination, remote_filename])
self.assertTrue(remote_filepath in self.data_layer.media_files)
|
py | 7df8622e34cb8b5f81944b59bd1911f2aafc185d | import json
from flask_restplus import Namespace, Resource, fields
from flask import request
from core import ClasseExemplo
api = Namespace(
'Exemplo',
description='Este é um exemplo de API'
)
# modelo de JSON a ser seguido
exemplo_modelo = api.model(
'Exemplo',
{
'nome': fields.String(
required=True,
description='Nome da pessoa'
)
}
)
@api.route('/consulta')
class FazerConsulta(Resource):
@api.response(200, '{"status": "Ok"}')
@api.response(500, '{"erro": "Tente novamente mais tarde"}')
def get(self):
'''Explicação específica deste endpoint.
'''
try:
return dict(status='Ok'), 200
except Exception as e:
return dict(
erro='Tente novamente mais tarde',
mensagem=e
), 500
@api.route('/consulta/<string:nome>')
class FazerConsultaComParam(Resource):
@api.param('nome', 'O nome da pessoa')
@api.response(200, '{"status": "Ip público"}')
@api.response(400, '{"erro": "Não conseguimos nos conectar"}')
def get(self, nome):
'''Explicação específica deste endpoint.
'''
try:
controlador = Classe_exemplo()
response, status = controlador.consulta_com_nome()
if status:
return dict(status=response), 200
return dict(erro=response)
except Exception as e:
return dict(
erro='Tente novamente mais tarde',
mensagem=e
), 500
@api.route('/enviar_dados')
class EnviarDados(Resource):
@api.expect(exemplo_modelo)
@api.response(200, '{"status": "Ok"}')
@api.response(500, '{"erro": "Tente novamente mais tarde"}')
@api.response(400, '{"erro": "requisição inválida"}')
def post(self):
try:
if request.is_json and len(request.data) > 0:
req = request.get_json()
print(req)
return dict(status='Ok'), 200
return dict(erro='requisição inválida'), 400
except Exception as e:
return dict(
erro='Tente novamente mais tarde',
mensagem=e
), 500
|
py | 7df8637e32f47c3e5a73f152161cc4e8bb3b1aaf | from flask_wtf import Form
from wtforms import StringField, PasswordField, BooleanField, SubmitField,ValidationError
from wtforms.validators import DataRequired, Length, Email,Regexp,EqualTo
from ..models import User
class LoginForm(Form):
email = StringField('Email', validators=[DataRequired(), Length(1, 64),
Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Log In')
class RegistrationForm(Form):
email = StringField('Email', validators=[DataRequired(),Length(1,64),Email()])
username = StringField('Username', validators=[DataRequired(),Length(1,64),
Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,'Usernames must have only letters, '
'numbers, dots or underscores')])
password = PasswordField('Password', validators=[DataRequired(), EqualTo('password2',
message='Password must match')])
password2 = PasswordField('Confirm password', validators=[DataRequired()])
submit = SubmitField('Register')
def validate_email(self,field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self,field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Username already in use.')
class ChangePasswordForm(Form):
old_password = PasswordField('Old password', validators=[DataRequired()])
password = PasswordField('New password', validators=[
DataRequired(), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('Confirm new password', validators=[DataRequired()])
submit = SubmitField('Update Password')
class PasswordResetRequestForm(Form):
email = StringField('Email', validators=[DataRequired(), Length(1, 64),
Email()])
submit = SubmitField('Reset Password')
class PasswordResetForm(Form):
email = StringField('Email', validators=[DataRequired(), Length(1, 64),
Email()])
password = PasswordField('New Password', validators=[
DataRequired(), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('Confirm password', validators=[DataRequired()])
submit = SubmitField('Reset Password')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first() is None:
raise ValidationError('Unknown email address.')
class ChangeEmailForm(Form):
email = StringField('New Email', validators=[DataRequired(), Length(1, 64),
Email()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Update Email Address')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.') |
py | 7df86567b944ca6872ca7cce5b1f0df3f6d50413 | from typing import TYPE_CHECKING, Dict, List, Optional, Union
from modules.base import AWSIamAssembler, AWSK8sModuleProcessor, K8sServiceModuleProcessor
from modules.linker_helper import LinkerHelper
from opta.core.kubernetes import (
create_namespace_if_not_exists,
get_manual_secrets,
list_namespaces,
)
from opta.exceptions import UserErrors
if TYPE_CHECKING:
from opta.layer import Layer
from opta.module import Module
class AwsK8sServiceProcessor(
AWSK8sModuleProcessor, K8sServiceModuleProcessor, AWSIamAssembler
):
FLAG_MULTIPLE_PORTS_SUPPORTED = True
def __init__(self, module: "Module", layer: "Layer"):
if (module.aliased_type or module.type) != "aws-k8s-service":
raise Exception(
f"The module {module.name} was expected to be of type aws k8s service"
)
super(AwsK8sServiceProcessor, self).__init__(module, layer)
def pre_hook(self, module_idx: int) -> None:
list_namespaces()
create_namespace_if_not_exists(self.layer.name)
manual_secrets = get_manual_secrets(self.layer.name)
for secret_name in self.module.data.get("secrets", []):
if secret_name not in manual_secrets:
raise UserErrors(
f"Secret {secret_name} has not been set via opta secret update! Please do so before applying the "
f"K8s service w/ a new secret."
)
super(AwsK8sServiceProcessor, self).pre_hook(module_idx)
def post_hook(self, module_idx: int, exception: Optional[Exception]) -> None:
self._extra_ports_controller()
super().post_hook(module_idx, exception)
def process(self, module_idx: int) -> None:
# Update the secrets
self.module.data["manual_secrets"] = self.module.data.get("secrets", [])
self.module.data["link_secrets"] = self.module.data.get("link_secrets", [])
current_envars: Union[List, Dict[str, str]] = self.module.data.get("env_vars", [])
if isinstance(current_envars, dict):
self.module.data["env_vars"] = [
{"name": x, "value": y} for x, y in current_envars.items()
]
# Handle links
for link_data in self.module.data.get("links", []):
if type(link_data) is str:
target_module_name = link_data
link_permissions = []
elif type(link_data) is dict:
target_module_name = list(link_data.keys())[0]
link_permissions = list(link_data.values())[0]
else:
raise UserErrors(
f"Link data {link_data} must be a string or map holding the permissions"
)
module = self.layer.get_module(target_module_name, module_idx)
if module is None:
raise Exception(
f"Did not find the desired module {target_module_name} "
"make sure that the module you're referencing is listed before the k8s "
"app one"
)
module_type = module.aliased_type or module.type
if module_type == "aws-postgres":
LinkerHelper.handle_link(
module=self.module,
linked_module=module,
link_permissions=link_permissions,
required_vars=["db_user", "db_name", "db_password", "db_host"],
)
elif module_type == "aws-mysql":
LinkerHelper.handle_link(
module=self.module,
linked_module=module,
link_permissions=link_permissions,
required_vars=["db_user", "db_name", "db_password", "db_host"],
)
elif module_type == "aws-redis":
LinkerHelper.handle_link(
module=self.module,
linked_module=module,
link_permissions=link_permissions,
required_vars=["cache_host", "cache_auth_token"],
)
elif module_type == "aws-documentdb":
LinkerHelper.handle_link(
module=self.module,
linked_module=module,
link_permissions=link_permissions,
required_vars=["db_user", "db_host", "db_password"],
)
elif module_type == "aws-s3":
self.handle_s3_link(module, link_permissions)
elif module_type == "aws-sqs":
self.handle_sqs_link(module, link_permissions)
elif module_type == "aws-sns":
self.handle_sns_link(module, link_permissions)
elif module_type == "aws-dynamodb":
self.handle_dynamodb_link(module, link_permissions)
elif module_type == "mongodb-atlas":
LinkerHelper.handle_link(
module=self.module,
linked_module=module,
link_permissions=link_permissions,
required_vars=[
"db_password",
"db_user",
"mongodb_atlas_connection_string",
],
)
else:
raise Exception(
f"Unsupported module type for k8s service link: {module_type}"
)
iam_statements = [
{
"Sid": "DescribeCluster",
"Action": ["eks:DescribeCluster"],
"Effect": "Allow",
"Resource": ["*"],
}
]
iam_statements += self.prepare_iam_statements()
self.module.data["iam_policy"] = {
"Version": "2012-10-17",
"Statement": iam_statements,
}
if "image_tag" in self.layer.variables:
self.module.data["tag"] = self.layer.variables["image_tag"]
if "image_digest" in self.layer.variables:
self.module.data["digest"] = self.layer.variables["image_digest"]
seen = set()
self.module.data["link_secrets"] = [
seen.add(obj["name"]) or obj # type: ignore
for obj in self.module.data["link_secrets"]
if obj["name"] not in seen
]
super(AwsK8sServiceProcessor, self).process(module_idx)
|
py | 7df865abc6e3ee1e3b150141862875df0924d359 | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo
from app.models import User
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField(
'Repeat Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Register')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError('Please use a different email address.')
|
py | 7df865f39c8db3800b98c14d5b3860a757c224da | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 工具-GS_BASEBACKUP
Case Name : 备份过程中,备份用户的备份权限被收回
Description :
1.开始备份:gs_basebackup -D /usr2/chenchen/basebackup/bak_a -Fp -Xstream
-p 18333 -h ip -c fast -l gauss_30.bak -P -v -U sysadmin -w
2.备份结束之前停掉数据库
Expect :
1.备份失败,备份异常结束。
2.无法使用备份目录恢复数据库
History :
"""
import os
import time
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.Logger import Logger
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.ComThread import ComThread
class GsBaseBackUpCase33(unittest.TestCase):
def setUp(self):
self.Primary_User_Node = Node('PrimaryDbUser')
self.Primary_Root_Node = Node('PrimaryRoot')
self.LOG = Logger()
self.Constant = Constant()
self.Primary_SH = CommonSH('PrimaryDbUser')
self.U_NAME = 'u_basebackup_33'
self.gs_basebackup_bak_path = os.path.join(macro.DB_BACKUP_PATH,
'gs_basebackup')
self.gs_basebackup_bak_name = 'gs_basebackup_Case0033.bak'
self.Gs_Basebackup_Expect_Msg = 'gs_basebackup: could not connect ' \
'to server: FATAL: Normal user is not allowed to use HA channel!'
self.LOG.info(
'----Opengauss_Function_Tools_gs_basebackup_Case0033 start----')
def test_server_tools(self):
self.LOG.info('----创建备份目录----')
is_dir_exists_cmd = f'''if [ ! -d "{self.gs_basebackup_bak_path}" ]
then
mkdir -p {self.gs_basebackup_bak_path}
fi'''
result = self.Primary_User_Node.sh(is_dir_exists_cmd).result()
self.LOG.info(result)
self.assertEqual(result, '')
self.LOG.info('----修改备份目录权限700,以免权限有误----')
chmod_cmd = f"chmod 700 -R {self.gs_basebackup_bak_path}"
self.LOG.info(chmod_cmd)
chmod_msg = self.Primary_Root_Node.sh(chmod_cmd).result()
self.LOG.info(chmod_msg)
self.assertEqual(chmod_msg, '')
self.LOG.info('----查看备份目录----')
ls_cmd = f"ls -l {os.path.dirname(self.gs_basebackup_bak_path)}"
self.LOG.info(ls_cmd)
ls_msg = self.Primary_User_Node.sh(ls_cmd).result()
self.LOG.info(ls_msg)
self.LOG.info('----创建用户并赋权----')
sql_cmd = f"create user {self.U_NAME} " \
f"with password '{macro.COMMON_PASSWD}'; " \
f"alter user {self.U_NAME} with createrole; " \
f"alter user {self.U_NAME} with createdb; " \
f"alter user {self.U_NAME} with replication;"
self.LOG.info(sql_cmd)
result = self.Primary_SH.execut_db_sql(sql_cmd)
self.LOG.info(result)
self.assertIn(self.Constant.CREATE_ROLE_SUCCESS_MSG, result)
self.assertIn(self.Constant.ALTER_ROLE_SUCCESS_MSG, result)
self.LOG.info('----执行备份----')
gs_basebackup_cmd = f"gs_basebackup " \
f"-D {self.gs_basebackup_bak_path} " \
f"-Fp " \
f"-Xstream " \
f"-p {self.Primary_User_Node.db_port} " \
f"-c fast " \
f"-l {self.gs_basebackup_bak_name} " \
f"-P " \
f"-v " \
f"-U {self.U_NAME} " \
f"-w"
backup_cmd = f"source {macro.DB_ENV_PATH}; {gs_basebackup_cmd}"
self.LOG.info(backup_cmd)
gs_basebackup_thread = ComThread(self.Primary_User_Node.sh,
args=(backup_cmd,))
gs_basebackup_thread.setDaemon(True)
gs_basebackup_thread.start()
time.sleep(2)
self.LOG.info("----回收用户权限----")
sql_cmd = f"alter user {self.U_NAME} with noreplication;"
self.LOG.info(sql_cmd)
result = self.Primary_SH.execut_db_sql(sql_cmd)
self.LOG.info(result)
self.assertIn(self.Constant.ALTER_ROLE_SUCCESS_MSG, result)
self.LOG.info("----获取gs_basebackup执行结果----")
gs_basebackup_thread.join(300)
gs_basebackup_result = gs_basebackup_thread.get_result()
self.LOG.info(gs_basebackup_result.result())
self.assertIn(self.Gs_Basebackup_Expect_Msg,
gs_basebackup_result.result())
def tearDown(self):
self.LOG.info('----删除备份文件----')
rm_cmd = f'''rm -rf {self.gs_basebackup_bak_path}'''
primary_result = self.Primary_User_Node.sh(rm_cmd).result()
self.LOG.info(primary_result)
self.LOG.info("----删除用户----")
sql_cmd = f"drop user if exists {self.U_NAME}"
self.LOG.info(sql_cmd)
result = self.Primary_SH.execut_db_sql(sql_cmd)
self.LOG.info(result)
self.LOG.info(
'----Opengauss_Function_Tools_gs_basebackup_Case0033 end----')
|
py | 7df8666f83616edb6fcfb07729acd11d41ae0681 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-12-08 09:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('konfera', '0018_auto_20161203_2305'),
]
operations = [
migrations.AddField(
model_name='talk',
name='language',
field=models.CharField(choices=[('SK', 'Slovak'), ('CZ', 'Czech'), ('EN', 'English')], default='EN', max_length=2),
),
]
|
py | 7df8668ee9450b3490ffaad18f753a59335d3973 | from enum import Enum
from typing import Optional
import archspec.cpu
from pydantic import BaseModel as PydanticBaseModel
from pydantic import Field, validator
from pydantic.types import PositiveInt
# Use the CPU families from archspec as the base CPU architecture
CPUArch = Enum( # type: ignore # Ref: python/mypy#529, #535 and #5317
"CPUArch",
{name: name for name, march in archspec.cpu.TARGETS.items() if not march.parents},
type=str,
)
# For the Microarchitecture, use the non-generic, non-family entries
CPUMicroArch = Enum( # type: ignore # Ref: python/mypy#529, #535 and #5317
"CPUMicroArch",
{
name: name
for name, march in archspec.cpu.TARGETS.items()
if march.parents and march.vendor != "generic"
},
type=str,
)
class CPU(PydanticBaseModel):
arch: CPUArch = Field(..., description="CPU architecture")
# Overlap of arch with microarch is intended since Docker classifies compatibility
# into broader categories rather than microarchitectures.
microarch: Optional[CPUMicroArch] = Field(
description="Microarchitecture, must match the architecture if specified"
)
ncores: PositiveInt = Field(..., description="Number of cores per CPU")
nthreads: PositiveInt = Field(1, description="Number of threads (per core)")
@validator("microarch")
def microarch_matches_arch(cls, v, values, **kwargs): # noqa: B902
if not (archspec.cpu.TARGETS[values["arch"]] < archspec.cpu.TARGETS[v]):
raise ValueError(
f"invalid microarchitecture '{v}' for architecture '{values['arch']}'"
)
return v
|
py | 7df867f464619c23136b4e9442b892db3ebee238 | from sqlalchemy import *
from base import Base
class SampleMediumCV(Base):
__tablename__ = 'SampleMediumCV'
term = Column('Term', String(255), primary_key=True)
definition = Column('Definition', String(255))
def __repr__(self):
return "<SampleMedium('%s', '%s')>" % (self.term, self.definition)
|
py | 7df86858e5fe3e1113e0b862525f762ac986a2cf | #!/usr/bin/env python3
# Copyright 2022 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
from doctest import SKIP
import os
import re
import subprocess
import sys
import tempfile
# find our home
ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(ROOT)
vendors = collections.defaultdict(list)
scores = collections.defaultdict(int)
avoidness = collections.defaultdict(int)
consumes = {}
no_update = set()
buildozer_commands = []
needs_codegen_base_src = set()
original_deps = {}
original_external_deps = {}
# TODO(ctiller): ideally we wouldn't hardcode a bunch of paths here.
# We can likely parse out BUILD files from dependencies to generate this index.
EXTERNAL_DEPS = {
'absl/base/attributes.h':
'absl/base:core_headers',
'absl/base/call_once.h':
'absl/base',
# TODO(ctiller) remove this
'absl/base/internal/endian.h':
'absl-base',
'absl/base/thread_annotations.h':
'absl/base:core_headers',
'absl/container/flat_hash_map.h':
'absl/container:flat_hash_map',
'absl/container/flat_hash_set.h':
'absl/container:flat_hash_set',
'absl/container/inlined_vector.h':
'absl/container:inlined_vector',
'absl/functional/bind_front.h':
'absl/functional:bind_front',
'absl/functional/function_ref.h':
'absl/functional:function_ref',
'absl/hash/hash.h':
'absl/hash',
'absl/memory/memory.h':
'absl/memory',
'absl/meta/type_traits.h':
'absl/meta:type_traits',
'absl/random/random.h':
'absl/random',
'absl/status/status.h':
'absl/status',
'absl/status/statusor.h':
'absl/status:statusor',
'absl/strings/ascii.h':
'absl/strings',
'absl/strings/cord.h':
'absl/strings:cord',
'absl/strings/escaping.h':
'absl/strings',
'absl/strings/match.h':
'absl/strings',
'absl/strings/numbers.h':
'absl/strings',
'absl/strings/str_cat.h':
'absl/strings',
'absl/strings/str_format.h':
'absl/strings:str_format',
'absl/strings/str_join.h':
'absl/strings',
'absl/strings/str_replace.h':
'absl/strings',
'absl/strings/str_split.h':
'absl/strings',
'absl/strings/string_view.h':
'absl/strings',
'absl/strings/strip.h':
'absl/strings',
'absl/strings/substitute.h':
'absl/strings',
'absl/synchronization/mutex.h':
'absl/synchronization',
'absl/synchronization/notification.h':
'absl/synchronization',
'absl/time/clock.h':
'absl/time',
'absl/time/time.h':
'absl/time',
'absl/types/optional.h':
'absl/types:optional',
'absl/types/span.h':
'absl/types:span',
'absl/types/variant.h':
'absl/types:variant',
'absl/utility/utility.h':
'absl/utility',
'address_sorting/address_sorting.h':
'address_sorting',
'ares.h':
'cares',
'gmock/gmock.h':
'gtest',
'gtest/gtest.h':
'gtest',
'opencensus/trace/context_util.h':
'opencensus-trace-context_util',
'opencensus/trace/propagation/grpc_trace_bin.h':
'opencensus-trace-propagation',
'opencensus/tags/context_util.h':
'opencensus-tags-context_util',
'openssl/bio.h':
'libssl',
'openssl/bn.h':
'libcrypto',
'openssl/buffer.h':
'libcrypto',
'openssl/crypto.h':
'libcrypto',
'openssl/engine.h':
'libcrypto',
'openssl/err.h':
'libcrypto',
'openssl/evp.h':
'libcrypto',
'openssl/hmac.h':
'libcrypto',
'openssl/pem.h':
'libcrypto',
'openssl/rsa.h':
'libcrypto',
'openssl/sha.h':
'libcrypto',
'openssl/ssl.h':
'libssl',
'openssl/tls1.h':
'libssl',
'openssl/x509.h':
'libcrypto',
'openssl/x509v3.h':
'libcrypto',
're2/re2.h':
're2',
'upb/def.h':
'upb_lib',
'upb/json_encode.h':
'upb_json_lib',
'upb/text_encode.h':
'upb_textformat_lib',
'upb/def.hpp':
'upb_reflection',
'upb/upb.h':
'upb_lib',
'upb/upb.hpp':
'upb_lib',
'xxhash.h':
'xxhash',
'zlib.h':
'madler_zlib',
}
INTERNAL_DEPS = {
'google/rpc/status.upb.h':
'google_rpc_status_upb',
'google/protobuf/any.upb.h':
'protobuf_any_upb',
'google/protobuf/duration.upb.h':
'protobuf_duration_upb',
'google/protobuf/struct.upb.h':
'protobuf_struct_upb',
'google/protobuf/timestamp.upb.h':
'protobuf_timestamp_upb',
'google/protobuf/wrappers.upb.h':
'protobuf_wrappers_upb',
'grpc/status.h':
'grpc_public_hdrs',
'src/proto/grpc/channelz/channelz.grpc.pb.h':
'//src/proto/grpc/channelz:channelz_proto',
'src/proto/grpc/core/stats.pb.h':
'//src/proto/grpc/core:stats_proto',
'src/proto/grpc/health/v1/health.upb.h':
'grpc_health_upb',
'src/proto/grpc/lb/v1/load_reporter.grpc.pb.h':
'//src/proto/grpc/lb/v1:load_reporter_proto',
'src/proto/grpc/lb/v1/load_balancer.upb.h':
'grpc_lb_upb',
'src/proto/grpc/reflection/v1alpha/reflection.grpc.pb.h':
'//src/proto/grpc/reflection/v1alpha:reflection_proto',
'src/proto/grpc/gcp/transport_security_common.upb.h':
'alts_upb',
'src/proto/grpc/gcp/altscontext.upb.h':
'alts_upb',
'src/proto/grpc/lookup/v1/rls.upb.h':
'rls_upb',
'src/proto/grpc/lookup/v1/rls_config.upb.h':
'rls_config_upb',
'src/proto/grpc/lookup/v1/rls_config.upbdefs.h':
'rls_config_upbdefs',
'src/proto/grpc/testing/xds/v3/csds.grpc.pb.h':
'//src/proto/grpc/testing/xds/v3:csds_proto',
'xds/data/orca/v3/orca_load_report.upb.h':
'xds_orca_upb',
'xds/service/orca/v3/orca.upb.h':
'xds_orca_service_upb',
'xds/type/v3/typed_struct.upb.h':
'xds_type_upb',
}
SKIP_DEPS = {'google/api/expr/v1alpha1/syntax.upb.h'}
class FakeSelects:
def config_setting_group(self, **kwargs):
pass
def grpc_cc_library(name,
hdrs=[],
public_hdrs=[],
srcs=[],
select_deps=None,
tags=[],
deps=[],
external_deps=[],
**kwargs):
if select_deps or 'nofixdeps' in tags or 'grpc-autodeps' not in tags:
no_update.add(name)
scores[name] = len(public_hdrs + hdrs)
# avoid_dep is the internal way of saying prefer something else
# we add grpc_avoid_dep to allow internal grpc-only stuff to avoid each
# other, whilst not biasing dependent projects
if 'avoid_dep' in tags or 'grpc_avoid_dep' in tags:
avoidness[name] += 10
if 'nofixdeps' in tags:
avoidness[name] += 1
for hdr in hdrs + public_hdrs:
vendors[hdr].append(name)
inc = set()
original_deps[name] = frozenset(deps)
original_external_deps[name] = frozenset(external_deps)
for src in hdrs + public_hdrs + srcs:
for line in open(src):
m = re.search(r'#include <(.*)>', line)
if m:
inc.add(m.group(1))
m = re.search(r'#include "(.*)"', line)
if m:
inc.add(m.group(1))
if 'grpc::g_glip' in line or 'grpc:g_core_codegen_interface' in line:
needs_codegen_base_src.add(name)
consumes[name] = list(inc)
def buildozer(cmd, target):
buildozer_commands.append('%s|%s' % (cmd, target))
def buildozer_set_list(name, values, target, via=""):
if not values:
buildozer('remove %s' % name, target)
return
adjust = via if via else name
buildozer('set %s %s' % (adjust, ' '.join('"%s"' % s for s in values)),
target)
if via:
buildozer('remove %s' % name, target)
buildozer('rename %s %s' % (via, name), target)
def score_edit_distance(proposed, existing):
"""Score a proposed change primarily by edit distance"""
sum = 0
for p in proposed:
if p not in existing:
sum += 1
for e in existing:
if e not in proposed:
sum += 1
return sum
def total_score(proposal):
return sum(scores[dep] for dep in proposal)
def total_avoidness(proposal):
return sum(avoidness[dep] for dep in proposal)
def score_list_size(proposed, existing):
"""Score a proposed change primarily by number of dependencies"""
return len(proposed)
def score_best(proposed, existing):
"""Score a proposed change primarily by dependency score"""
return 0
SCORERS = {
'edit_distance': score_edit_distance,
'list_size': score_list_size,
'best': score_best,
}
parser = argparse.ArgumentParser(description='Fix build dependencies')
parser.add_argument('targets',
nargs='*',
default=[],
help='targets to fix (empty => all)')
parser.add_argument('--score',
type=str,
default='edit_distance',
help='scoring function to use: one of ' +
', '.join(SCORERS.keys()))
args = parser.parse_args()
exec(
open('BUILD', 'r').read(), {
'load': lambda filename, *args: None,
'licenses': lambda licenses: None,
'package': lambda **kwargs: None,
'exports_files': lambda files: None,
'config_setting': lambda **kwargs: None,
'selects': FakeSelects(),
'python_config_settings': lambda **kwargs: None,
'grpc_cc_library': grpc_cc_library,
'select': lambda d: d["//conditions:default"],
'grpc_upb_proto_library': lambda name, **kwargs: None,
'grpc_upb_proto_reflection_library': lambda name, **kwargs: None,
'grpc_generate_one_off_targets': lambda: None,
'filegroup': lambda name, **kwargs: None,
}, {})
# Keeps track of all possible sets of dependencies that could satify the
# problem. (models the list monad in Haskell!)
class Choices:
def __init__(self):
self.choices = set()
self.choices.add(frozenset())
def add_one_of(self, choices):
if not choices:
return
new_choices = set()
for append_choice in choices:
for choice in self.choices:
new_choices.add(choice.union([append_choice]))
self.choices = new_choices
def add(self, choice):
self.add_one_of([choice])
def remove(self, remove):
new_choices = set()
for choice in self.choices:
new_choices.add(choice.difference([remove]))
self.choices = new_choices
def best(self, scorer):
best = None
final_scorer = lambda x: (total_avoidness(x), scorer(x), total_score(x))
for choice in self.choices:
if best is None or final_scorer(choice) < final_scorer(best):
best = choice
return best
error = False
for library in sorted(consumes.keys()):
if library in no_update:
continue
if args.targets and library not in args.targets:
continue
hdrs = sorted(consumes[library])
deps = Choices()
external_deps = Choices()
for hdr in hdrs:
if hdr == 'src/core/lib/profiling/stap_probes.h':
continue
if hdr in INTERNAL_DEPS:
deps.add(INTERNAL_DEPS[hdr])
continue
if hdr in vendors:
deps.add_one_of(vendors[hdr])
continue
if 'include/' + hdr in vendors:
deps.add_one_of(vendors['include/' + hdr])
continue
if '.' not in hdr:
# assume a c++ system include
continue
if hdr in EXTERNAL_DEPS:
external_deps.add(EXTERNAL_DEPS[hdr])
continue
if hdr.startswith('opencensus/'):
trail = hdr[len('opencensus/'):]
trail = trail[:trail.find('/')]
external_deps.add('opencensus-' + trail)
continue
if hdr.startswith('envoy/'):
path, file = os.path.split(hdr)
file = file.split('.')
path = path.split('/')
dep = '_'.join(path[:-1] + [file[1]])
deps.add(dep)
continue
if hdr.startswith('google/protobuf/') and not hdr.endswith('.upb.h'):
external_deps.add('protobuf_headers')
continue
if '/' not in hdr:
# assume a system include
continue
is_sys_include = False
for sys_path in [
'sys',
'arpa',
'netinet',
'linux',
'android',
'mach',
'net',
'CoreFoundation',
]:
if hdr.startswith(sys_path + '/'):
is_sys_include = True
break
if is_sys_include:
# assume a system include
continue
if hdr in SKIP_DEPS:
continue
print("# ERROR: can't categorize header: %s" % hdr)
error = True
if library in needs_codegen_base_src:
deps.add('grpc++_codegen_base_src')
deps.remove(library)
deps = sorted(
deps.best(lambda x: SCORERS[args.score](x, original_deps[library])))
external_deps = sorted(
external_deps.best(lambda x: SCORERS[args.score]
(x, original_external_deps[library])))
target = ':' + library
buildozer_set_list('external_deps', external_deps, target, via='deps')
buildozer_set_list('deps', deps, target)
if buildozer_commands:
ok_statuses = (0, 3)
temp = tempfile.NamedTemporaryFile()
open(temp.name, 'w').write('\n'.join(buildozer_commands))
c = ['tools/distrib/buildozer.sh', '-f', temp.name]
r = subprocess.call(c)
if r not in ok_statuses:
print('{} failed with status {}'.format(c, r))
sys.exit(1)
if error:
sys.exit(1)
|
py | 7df868c3d2695a50fb8d42fcdc7fa2c14f85b47a | #!/usr/bin/env python
'''
@Author: Balasubramanyam Evani
Manipal University Jaipur
'''
## Importing necessary libraries
import rospy
from dynamic_reconfigure.server import Server
from laser_onto_kinect.cfg import configConfig
import tf
import math
## class containig methods for implementing dynamic reconfigure and sending the transform
class dynamicTF(object):
def __init__(self):
self.parent_name = rospy.get_param('~parent' , 'camera_rgb_frame') ## parameter specifying the parent frame
self.child_name = rospy.get_param('~child' , 'laser') ## parameter specifying the child frame
self.linear = [0 ,0, 0] ## initializing translation
self.rot = [0 , 0, 0] ## initializing rotation
self.br = tf.TransformBroadcaster() ## Transform Broadcaster object
def run(self): ## Run at 10Hz
rospy.Rate(10)
config = Server(configConfig , self.callback) ## dynamic configure callback
self.send_transform() ## sending the tf
rospy.spin() ## stops from exiting
def callback(self , config , level):
self.linear = [ float("{x}".format(**config)) , float("{y}".format(**config)) , float("{z}".format(**config))]
self.rot = [float("{roll}".format(**config)), float("{pitch}".format(**config)), float("{yaw}".format(**config))]
return config
def send_transform(self):
rospy.Rate(10)
while not rospy.is_shutdown():
self.br.sendTransform((self.linear[0] , self.linear[1] , self.linear[2]), tf.transformations.quaternion_from_euler(self.rot[0] * math.pi/180.0 , self.rot[1] * math.pi/180.0 ,self.rot[2]* math.pi/180.0) , rospy.Time.now() , self.child_name , self.parent_name) ## converting degrees to rads and sendign the tranform b/w parent and child
## Main Function Call
if __name__ == '__main__':
rospy.init_node("TF_broadcaster" , anonymous = True) ## initialization of node
TF = dynamicTF() ## Creating object of class dynamicTF
TF.run() ## running the process
|
py | 7df869a055d59871bb54a53a013e405b826e8698 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from adcm.settings import * # pylint: disable=wildcard-import,unused-wildcard-import
DEBUG = True
|
py | 7df869a15e76c28afb609fa4dbc059144ad70161 | print("Hello, World!")
|
py | 7df869a9e1b2e003f3dda19c95d6eed2ead330c2 | from django.utils.translation import ugettext_lazy as _
from mayan.apps.navigation.classes import Link
from mayan.apps.navigation.utils import factory_condition_queryset_access
from .icons import (
icon_document_file_content, icon_document_file_content_delete_single,
icon_document_file_content_delete_multiple,
icon_document_file_content_download, icon_document_file_page_content,
icon_document_file_parsing_errors_list, icon_document_file_submit,
icon_document_type_parsing_settings, icon_document_type_submit,
icon_error_list
)
from .permissions import (
permission_document_file_content_view, permission_document_type_parsing_setup,
permission_document_file_parse
)
# Document file
link_document_file_content = Link(
args='resolved_object.id', icon=icon_document_file_content,
permissions=(permission_document_file_content_view,), text=_('Content'),
view='document_parsing:document_file_content_view'
)
link_document_file_content_delete_single = Link(
args='resolved_object.id', icon=icon_document_file_content_delete_single,
permissions=(permission_document_file_parse,),
text=_('Delete parsed content'),
view='document_parsing:document_file_content_delete_single',
)
link_document_file_content_delete_multiple = Link(
icon=icon_document_file_content_delete_multiple,
text=_('Delete parsed content'),
view='document_parsing:document_file_content_delete_multiple',
)
link_document_file_content_download = Link(
args='resolved_object.id', icon=icon_document_file_content_download,
permissions=(permission_document_file_content_view,),
text=_('Download content'),
view='document_parsing:document_file_content_download'
)
link_document_file_page_content = Link(
args='resolved_object.id',
icon=icon_document_file_page_content,
permissions=(permission_document_file_content_view,), text=_('Content'),
view='document_parsing:document_file_page_content_view'
)
link_document_file_parsing_errors_list = Link(
args='resolved_object.id',
icon=icon_document_file_parsing_errors_list,
permissions=(permission_document_file_parse,), text=_('Parsing errors'),
view='document_parsing:document_file_parsing_error_list'
)
link_document_file_metadata_submit_multiple = Link(
icon=icon_document_file_submit,
text=_('Submit for parsing'),
view='document_parsing:document_file_multiple_submit'
)
link_document_file_metadata_submit_single = Link(
args='resolved_object.id',
icon=icon_document_file_submit,
permissions=(permission_document_file_parse,),
text=_('Submit for parsing'),
view='document_parsing:document_file_submit'
)
# Document type
link_document_type_parsing_settings = Link(
args='resolved_object.id',
icon=icon_document_type_parsing_settings,
permissions=(permission_document_type_parsing_setup,),
text=_('Setup parsing'),
view='document_parsing:document_type_parsing_settings'
)
link_document_type_submit = Link(
condition=factory_condition_queryset_access(
app_label='documents', model_name='DocumentType',
object_permission=permission_document_type_parsing_setup
),
icon=icon_document_type_submit,
text=_('Parse documents per type'),
view='document_parsing:document_type_submit'
)
# Errors
link_error_list = Link(
icon=icon_error_list,
permissions=(permission_document_file_parse,), text=_('Parsing errors'),
view='document_parsing:error_list'
)
|
py | 7df869c282ee9240794859aa64afb584ded88563 | #!/usr/bin/env python3
"""
itio: Interactive Twinleaf I/O
License: MIT
Author: Thomas Kornack <[email protected]>
"""
import tldevice
import argparse
parser = argparse.ArgumentParser(prog='itio',
description='Interactive Twinleaf I/O.')
parser.add_argument("url",
nargs='?',
default='tcp://localhost/',
help='URL: tcp://localhost')
parser.add_argument("--rpc",
action='append',
default=[],
type=lambda kv: kv.split(":"),
help='Commands to be sent on start; rpc:type:val')
parser.add_argument('-v',
action="store_true",
default=False,
help='Verbose output for debugging')
parser.add_argument('-r',
action="store_false",
default=True,
help='Ignore and rebuild rpc/stream cache')
parser.add_argument('-t',
action="store_true",
default=False,
help='Quit after 1 second network socket timeout')
args = parser.parse_args()
device = tldevice.Device(url=args.url, verbose=args.v, rpcs=args.rpc, stateCache=args.r, timeout=args.t)
device._interact()
|
py | 7df86a75c14f981589322bbddaf6dcdd609be417 | # Generated by Django 3.1.2 on 2020-10-27 22:23
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
py | 7df86af4e43cdd103dffb6337fa249a86927903a | import discord
from redbot.core import commands
from redbot.core.utils.chat_formatting import box, humanize_list
from .events import main
class gsettings(main):
def __init__(self, bot):
super().__init__(bot)
@commands.group(
name="giveawaysettings",
aliases=["gset", "giveawaysetting"],
invoke_without_command=True,
)
@commands.admin_or_permissions(administrator=True)
async def gset(self, ctx):
"""
Customize giveaways to how you want them.
All subcommands represent a separate settings."""
await ctx.send_help("gset")
@gset.command(name="giveaway_message", aliases=["gmsg"], usage="<message>")
@commands.admin_or_permissions(administrator=True)
async def gmsg(self, ctx, *, message):
"""
Set a custom giveaway message.
This message shows above the giveaway embed."""
await self.config.set_guild_msg(ctx.guild, message)
await ctx.reply(f"The new giveaway message has been set to \n```\n{message}\n```")
@gset.command(name="thank_message", aliases=["tmsg"])
@commands.admin_or_permissions(administrator=True)
async def tmsg(self, ctx, *, message):
"""
Set a custom message for giveaways.
This message gets sent in an embed when you use the `--thank` flag while starting a giveaway.
Usable variables:
- donor :
donor.mention
donor.display_name
donor.name
- prize
Use these variables within curly brackets.
For Example:
`[p]gset tmsg Donated by: {donor.mention}
Prize: **{prize}**
Please thank **{donor.name}** in #general`"""
await self.config.set_guild_tmsg(ctx.guild, message)
await ctx.reply(f"The new giveaway message has been set to \n```\n{message}\n```")
@gset.command(name="emoji", usage="<emoji>")
@commands.admin_or_permissions(administrator=True)
async def emoji(self, ctx, emoji: discord.Emoji):
"""
Set a custom giveaway emoji that the bot reacts with on giveaway embeds.
The bot must have access to the emoji to be used."""
await self.config.set_guild_emoji(ctx.guild, emoji)
await ctx.reply(f"The new giveaway emoji has been set to {emoji}")
@gset.command(name="winnerdm", usage="<status>")
@commands.admin_or_permissions(administrator=True)
async def winnerdm(self, ctx, status: bool):
"""
Set whether the bot dms the winners when the giveaway ends.
This won't be able to dm if the winners have their dms closed."""
await self.config.set_guild_windm(ctx.guild, status)
await ctx.reply(
"The winner will be dm'ed when the giveaway ends now."
if status == True
else "The winner will not be dm'ed when the giveaway ends."
)
@gset.command(name="hostdm", usage="<status>")
@commands.admin_or_permissions(administrator=True)
async def hostdm(self, ctx, status: bool):
"""
Set whether the bot dms the host when the giveaway ends.
This won't be able to dm if the host has their dms closed."""
await self.config.set_guild_hostdm(ctx.guild, status)
await ctx.reply(
"The host will be dm'ed when the giveaway ends now."
if status == True
else "The host will not be dm'ed when the giveaway ends."
)
@gset.command(name="end_message", aliases=["endmsg"], usage="<message>")
@commands.admin_or_permissions(administrator=True)
async def endmsg(self, ctx, *, message):
"""
Set the message that gets sent when a giveaway ends.
Usable variables:
- prize : The prize of the giveaway
- winner : The winner(s) of the giveaway
- link : The jumplink to the giveaway.
For example:
`[p]gset endmsg Congratulations {winner}! You have won the givaway for **{prize}**.
{link}`"""
await self.config.set_guild_endmsg(ctx.guild, message)
await ctx.reply(f"The ending message has been changed to\n```\n{message}\n```")
@gset.command(name="manager", usage="<role>")
@commands.admin_or_permissions(administrator=True)
async def manager(self, ctx, *roles: discord.Role):
"""
Set roles that can manage giveaways in your server.
If you dont set this up, users will need either manage messages permission or the server's bot mod role."""
await self.config.set_manager(ctx.guild, [role.id for role in roles])
await ctx.reply(
f"{humanize_list(role.mention for role in roles)} have been set as the giveaway managers!",
allowed_mentions=discord.AllowedMentions(roles=False, replied_user=False),
)
@gset.command(name="pingrole", usage="<role>")
@commands.admin_or_permissions(administrator=True)
async def pingrole(self, ctx, role: discord.Role):
"""
Set which role gets pinged in giveaways.
This only takes effect when the `--ping` is used in giveaways."""
await self.config.set_guild_pingrole(ctx.guild, role.id)
await ctx.reply(
f"{role.mention} has been set as the pingrole!",
allowed_mentions=discord.AllowedMentions(roles=False, replied_user=False),
)
@gset.command(name="autodelete", aliases=["autodel"])
@commands.guild_only()
@commands.admin_or_permissions(administrator=True)
async def auto(self, ctx, toggle: bool):
"""
Set whether giveaway command invocations get automatically deleted or not.
Pass true to delete and false to not."""
await self.config.set_guild_autodelete(ctx.guild, toggle)
await ctx.reply(
"Giveaway commands will automatically delete now."
if toggle == True
else "Giveaway commands will retain."
)
@gset.command(name="blacklist", aliases=["bl"])
@commands.guild_only()
@commands.admin_or_permissions(administrator=True)
@commands.bot_has_permissions(embed_links=True)
async def bl_role(self, ctx, roles: commands.Greedy[discord.Role] = None):
"""
Blacklist roles from giveaway permanently without having to pass them as requirements each time.
You can send multiple role ids or mentions.
Sending nothing will show a list of blacklisted roles."""
if not roles:
roles = await self.config.all_blacklisted_roles(ctx.guild, False)
return await ctx.send(
embed=discord.Embed(
title=f"Giveaway Blacklisted Roles in `{ctx.guild.name}`!",
description="\n\n".join([str(role.mention) for role in roles])
if roles
else "No roles have been blacklisted from giveaways permanently.",
color=discord.Color.green(),
)
)
roles = await self.config.blacklist_role(ctx.guild, roles)
await ctx.send(roles)
@gset.command(name="unblacklist", aliases=["ubl"])
@commands.guild_only()
@commands.admin_or_permissions(administrator=True)
@commands.bot_has_permissions(embed_links=True)
async def ubl_role(self, ctx, roles: commands.Greedy[discord.Role]):
"""
Unblacklist previously blacklisted roles from giveaways."""
roles = await self.config.unblacklist_role(ctx.guild, roles)
return await ctx.send(roles)
@gset.command(name="bypass", aliases=["by"])
@commands.guild_only()
@commands.admin_or_permissions(administrator=True)
@commands.bot_has_permissions(embed_links=True)
async def by_role(
self,
ctx,
add_or_remove=None,
roles: commands.Greedy[discord.Role] = None,
):
"""
Set roles to bypass all giveaways in your server.
Passing no parameters will show a list of all roles set to bypass.
1st argument should be either of add or remove
2nd should be role ids or mentions separated by spaces."""
if not add_or_remove and not roles:
roles = await self.config.all_bypass_roles(ctx.guild, False)
return await ctx.send(
embed=discord.Embed(
title=f"Role Bypasses for `{ctx.guild.name}`!",
description="\n\n".join([str(role.mention) for role in roles])
if roles
else "No role bypasses set in this server.",
color=discord.Color.green(),
)
)
if not add_or_remove.lower() in ["add", "remove"]:
return await ctx.send_help("gset bypass")
if add_or_remove.lower() == "add":
roles = await self.config.bypass_role(ctx.guild, roles)
return await ctx.send(roles)
roles = await self.config.unbypass_role(ctx.guild, roles)
return await ctx.send(roles)
@gset.command(name="multi", aliases=["rolemulti", "rm"])
@commands.guild_only()
@commands.admin_or_permissions(administrator=True)
@commands.bot_has_permissions(embed_links=True)
async def role_multi(
self,
ctx,
add_or_remove=None,
role: discord.Role = None,
multi: int = None,
):
"""
Add role multipliers for giveaways.
This multiplier gives extra entries to users with that role in giveaways.
If a user has multiple roles each with its separate multiplier, all of them will apply to him.
A role's multiplier can not be greater than 5.
Passing no parameters will shpow you the current multipliers of the server.
[add_or_remove] takes either of 'add' or 'remove'.
[role] is the role name, id or mention and
[multi] is the multiplier amount. This is not required when you are removing."""
if not add_or_remove and not role and not multi:
roles = await self.config.get_all_roles_multi(ctx.guild)
return await ctx.send(
embed=discord.Embed(
title=f"Role Multipliers for `{ctx.guild.name}`'s giveaways!",
description=box(
"\n\n".join(
[
f"@{k.name:<10} {'<'+'-'*15+'>':>5} {v:>5}"
for k, v in roles.items()
]
)
if roles
else "No role multipliers set in this server."
),
color=discord.Color.green(),
)
)
if not add_or_remove.lower() in ["add", "remove"]:
return await ctx.send_help("gset multi")
if add_or_remove.lower() == "add":
if multi > 5:
return await ctx.send("Multipliers must be under 5x.")
role = await self.config.set_role_multi(role, multi)
return await ctx.send(role)
else:
role = await self.config.reset_role_multi(role)
return await ctx.send(role)
@gset.command(name="showsettings", aliases=["ss", "show", "showset"])
@commands.admin_or_permissions(administrator=True)
@commands.bot_has_permissions(embed_links=True)
async def show(self, ctx):
"""
See giveaway settings configured for your server"""
message = await self.config.get_guild_msg(ctx.guild)
emoji = await self.config.get_guild_emoji(ctx.guild)
winnerdm = await self.config.dm_winner(ctx.guild)
hostdm = await self.config.dm_host(ctx.guild)
endmsg = await self.config.get_guild_endmsg(ctx.guild)
managers = await self.config.get_managers(ctx.guild)
autodelete = await self.config.config.guild(ctx.guild).autodelete()
embed = discord.Embed(
title=f"Giveaway Settings for **__{ctx.guild.name}__**",
description=f"""
**Giveaway Managers:** {humanize_list([manager.mention for manager in managers if manager]) if managers else "No managers set. Requires manage message permission or bot's mod role."}
**Message:** {message}
**Reaction Emoji:** {emoji}
**Will the winner be dm'ed?:** {winnerdm}
**Will the host be dm'ed?:** {hostdm}
**Auto delete Giveaway Commands?:** {autodelete}
**Giveaway Ending message:** \n```\n{endmsg}\n```\n
""",
color=discord.Color.green(),
)
embed.set_footer(text=ctx.guild.name, icon_url=ctx.guild.icon.url)
embed.set_thumbnail(url=ctx.author.avatar.url)
await ctx.send(embed=embed)
|
py | 7df86b284691fc15aadbc603c06946f579413db9 | # -*- coding: utf-8 -*-
"""Test epoch_ms and utc datetime conversions."""
from dsdk.utils import (
epoch_ms_from_utc_datetime,
now_utc_datetime,
utc_datetime_from_epoch_ms,
)
def test_conversions():
"""Test conversions."""
expected = now_utc_datetime()
epoch_ms = epoch_ms_from_utc_datetime(expected)
actual = utc_datetime_from_epoch_ms(epoch_ms)
assert expected == actual
|
py | 7df86bad312c920fce947b96eb8ad3d58ca3aeca | """
Created on February 16 2021
@author: Andreas Spanopoulos
Script used to train an Alpha Zero agent.
Example usage:
python3 train.py
--train-config ../configurations/training_hyperparams.ini
--nn-config ../configurations/neural_network_architecture.ini
--nn-checkpoints ../models/checkpoints
--mcts-config ../configurations/mcts_hyperparams.ini
--device cpu
"""
import logging
import torch
from src.utils.main_utils import parse_train_input
from src.utils.config_parsing_utils import parse_config_file
from src.environment.variants.racing_kings import RacingKingsEnv
from src.environment.actions.racing_kings_actions import RacingKingsActions
from src.neural_network.network import NeuralNetwork
from src.neural_network.generic_network import GenericNeuralNetwork
from src.agent.chess_agent import AlphaZeroChessAgent
def main(args):
""" main() driver function """
# set logging format
fmt = "(%(filename)s:%(lineno)d) [%(levelname)s]: %(message)s"
logging.basicConfig(level=logging.INFO, format=fmt)
# create the environment and an API used to translate actions into their corresponding IDs
env = RacingKingsEnv()
mvt = RacingKingsActions()
# parse the specific configuration files in order to start building the class objects
model_configuration = parse_config_file(args.nn_config, _type='nn_architecture')
mcts_configuration = parse_config_file(args.mcts_config, _type='mcts_hyperparams')
train_configuration = parse_config_file(args.train_config, _type='training')
# add the checkpoints dictionary path to the training configuration dictionary
train_configuration['checkpoints_directory'] = args.nn_checkpoints
# determine the device on which to build and train the NN
device = torch.device(args.device)
# add additional information to the NN configuration and initialize it
model_configuration['input_shape'] = torch.Tensor(env.current_state_representation).shape
model_configuration['num_actions'] = mvt.num_actions
if args.generic:
model = GenericNeuralNetwork(model_configuration, device).to(device)
else:
model = NeuralNetwork(model_configuration, device).to(device)
# finally create the Chess agent
chess_agent = AlphaZeroChessAgent(env=env,
mvt=mvt,
nn=model,
device=device,
mcts_config=mcts_configuration,
train_config=train_configuration,
pretrained_w=args.pre_trained_weights)
# train the Chess agent using self play
chess_agent.train_agent()
if __name__ == "__main__":
print()
arg = parse_train_input()
main(arg)
|
py | 7df86c212be8dda944ed4b4ed0e23299f498d4a6 | import os
cwd = os.getcwd()
service_text = """
[Unit]
Description=PyFilter
After=network.target
[Service]
WorkingDirectory={}
ExecStart={}/run.sh
[Install]
WantedBy=multi-user.target
""".format(cwd, cwd)
with open("PyFilter.service", "w") as f:
f.write(service_text)
|
py | 7df86cb198f37a44af6a3378f8cb50a7ab61ca62 | from abc import ABC, abstractmethod
class Annotator(ABC):
def __init__(self):
super().__init__()
@abstractmethod
def annotate(self, string_of_text):
pass
|
py | 7df86d086cba81a2850eeacbd9d371441842a596 | import numpy as np
from faker import Faker
import random
from tqdm import tqdm
from babel.dates import format_date
from keras.utils import to_categorical
import keras.backend as K
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
matplotlib.pyplot.ion()
fake = Faker()
fake.seed(12345)
random.seed(12345)
# Define format of the data we would like to generate
FORMATS = ['short',
'medium',
'long',
'full',
'full',
'full',
'full',
'full',
'full',
'full',
'full',
'full',
'full',
'd MMM YYY',
'd MMMM YYY',
'dd MMM YYY',
'd MMM, YYY',
'd MMMM, YYY',
'dd, MMM YYY',
'd MM YY',
'd MMMM YYY',
'MMMM d YYY',
'MMMM d, YYY',
'dd.MM.YY']
# change this if you want it to work with another language
LOCALES = ['en_US']
def load_date():
"""
Loads some fake dates
:returns: tuple containing human readable string, machine readable string, and date object
"""
dt = fake.date_object()
try:
human_readable = format_date(dt, format=random.choice(FORMATS), locale=random.choice(LOCALES))
human_readable = human_readable.lower()
human_readable = human_readable.replace(',','')
machine_readable = dt.isoformat()
except AttributeError as e:
return None, None, None
return human_readable, machine_readable, dt
def load_dataset(m):
"""
Loads a dataset with m examples and vocabularies
:m: the number of examples to generate
"""
human_vocab = set()
machine_vocab = set()
dataset = []
Tx = 30
for i in tqdm(range(m)):
h, m, _ = load_date()
if h is not None:
dataset.append((h, m))
human_vocab.update(tuple(h))
machine_vocab.update(tuple(m))
human = dict(zip(
sorted(human_vocab) + ['<unk>', '<pad>'],
list(range(len(human_vocab) + 2))
))
inv_machine = dict(enumerate(sorted(machine_vocab)))
machine = { v:k for k,v in inv_machine.items() }
return dataset, human, machine, inv_machine
def preprocess_data(dataset, human_vocab, machine_vocab, Tx, Ty):
X, Y = zip(*dataset)
X = np.array([string_to_int(i, Tx, human_vocab) for i in X])
# X = [string_to_int(i, Tx, human_vocab) for i in X]
Y = np.array([string_to_int(t, Ty, machine_vocab) for t in Y])
# Y = [string_to_int(t, Ty, machine_vocab) for t in Y]
Xoh = np.array(list(map(lambda x: to_categorical(x, num_classes=len(human_vocab)), X)))
Yoh = np.array(list(map(lambda x: to_categorical(x, num_classes=len(machine_vocab)), Y)))
return X, Y, Xoh, Yoh
def string_to_int(string, length, vocab):
"""
Converts all strings in the vocabulary into a list of integers representing the positions of the
input string's characters in the "vocab"
Arguments:
string -- input string, e.g. 'Wed 10 Jul 2007'
length -- the number of time steps you'd like, determines if the output will be padded or cut
vocab -- vocabulary, dictionary used to index every character of your "string"
Returns:
rep -- list of integers (or '<unk>') (size = length) representing the position of the string's character in the vocabulary
"""
#make lower to standardize
string = string.lower()
string = string.replace(',','')
if len(string) > length:
string = string[:length]
rep = list(map(lambda x: vocab.get(x, '<unk>'), string))
if len(string) < length:
rep += [vocab['<pad>']] * (length - len(string))
#print (rep)
return rep
def int_to_string(ints, inv_vocab):
"""
Output a machine readable list of characters based on a list of indexes in the machine's vocabulary
Arguments:
ints -- list of integers representing indexes in the machine's vocabulary
inv_vocab -- dictionary mapping machine readable indexes to machine readable characters
Returns:
l -- list of characters corresponding to the indexes of ints thanks to the inv_vocab mapping
"""
l = [inv_vocab[i] for i in ints]
return l
EXAMPLES = ['3 May 1979', '5 Apr 09', '20th February 2016', 'Wed 10 Jul 2007']
def run_example(model, input_vocabulary, inv_output_vocabulary, text):
encoded = string_to_int(text, TIME_STEPS, input_vocabulary)
prediction = model.predict(np.array([encoded]))
prediction = np.argmax(prediction[0], axis=-1)
return int_to_string(prediction, inv_output_vocabulary)
def run_examples(model, input_vocabulary, inv_output_vocabulary, examples=EXAMPLES):
predicted = []
for example in examples:
predicted.append(''.join(run_example(model, input_vocabulary, inv_output_vocabulary, example)))
print('input:', example)
print('output:', predicted[-1])
return predicted
def softmax(x, axis=1):
"""Softmax activation function.
# Arguments
x : Tensor.
axis: Integer, axis along which the softmax normalization is applied.
# Returns
Tensor, output of softmax transformation.
# Raises
ValueError: In case `dim(x) == 1`.
"""
ndim = K.ndim(x)
if ndim == 2:
return K.softmax(x)
elif ndim > 2:
e = K.exp(x - K.max(x, axis=axis, keepdims=True))
s = K.sum(e, axis=axis, keepdims=True)
return e / s
else:
raise ValueError('Cannot apply softmax to a tensor that is 1D')
def plot_attention_map(model, input_vocabulary, inv_output_vocabulary, text, n_s = 128, num = 6, Tx = 30, Ty = 10):
"""
Plot the attention map.
"""
attention_map = np.zeros((10, 30))
Ty, Tx = attention_map.shape
s0 = np.zeros((1, n_s))
c0 = np.zeros((1, n_s))
layer = model.layers[num]
encoded = np.array(string_to_int(text, Tx, input_vocabulary)).reshape((1, 30))
encoded = np.array(list(map(lambda x: to_categorical(x, num_classes=len(input_vocabulary)), encoded)))
f = K.function(model.inputs, [layer.get_output_at(t) for t in range(Ty)])
r = f([encoded, s0, c0])
for t in range(Ty):
for t_prime in range(Tx):
attention_map[t][t_prime] = r[t][0,t_prime,0]
# Normalize attention map
# row_max = attention_map.max(axis=1)
# attention_map = attention_map / row_max[:, None]
prediction = model.predict([encoded, s0, c0])
predicted_text = []
for i in range(len(prediction)):
predicted_text.append(int(np.argmax(prediction[i], axis=1)))
predicted_text = list(predicted_text)
predicted_text = int_to_string(predicted_text, inv_output_vocabulary)
text_ = list(text)
# get the lengths of the string
input_length = len(text)
output_length = Ty
# Plot the attention_map
plt.clf()
f = plt.figure(figsize=(8, 8.5))
ax = f.add_subplot(1, 1, 1)
# add image
i = ax.imshow(attention_map, interpolation='nearest', cmap='Blues')
# add colorbar
cbaxes = f.add_axes([0.2, 0, 0.6, 0.03])
cbar = f.colorbar(i, cax=cbaxes, orientation='horizontal')
cbar.ax.set_xlabel('Alpha value (Probability output of the "softmax")', labelpad=2)
# add labels
ax.set_yticks(range(output_length))
ax.set_yticklabels(predicted_text[:output_length])
ax.set_xticks(range(input_length))
ax.set_xticklabels(text_[:input_length], rotation=45)
ax.set_xlabel('Input Sequence')
ax.set_ylabel('Output Sequence')
# add grid and legend
ax.grid()
#f.show()
return attention_map
|
py | 7df86d0cbd44d4fff1e5665839d4bb915a1a32ca | #!/usr/bin/python3
# -*- coding:utf-8 -*-
# Project: http://plankton-toolbox.org
# Copyright (c) 2010-2018 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
from PyQt5 import QtGui
from PyQt5 import QtWidgets
#from PyQt5 import QtCore
from abc import abstractmethod
class ActivityBase(QtWidgets.QWidget):
"""
Abstract base class for activities.
"""
def __init__(self, name, parentwidget):
""" """
# Initialize parent.
super(ActivityBase, self).__init__(parentwidget)
self._parent = parentwidget
self._mainmenubutton = None
#
self._write_to_status_bar('Loading ' + name + '...')
#
self.setObjectName(name)
# Add specific content. Abstract, implemented by subclasses.
self._create_content()
#
self._write_to_status_bar('')
def set_main_menu_button(self, button):
""" """
self._mainmenubutton = button
def get_main_menu_button(self):
""" """
return self._mainmenubutton
def show_in_main_window(self):
""" """
self._parent.showActivity(self)
@abstractmethod
def _create_content(self):
"""
Used to create the content of the activity window.
Note: Abstract. Should be implemented by subclasses.
"""
pass
def _create_scrollable_content(self):
"""
Creates the scrollable part of the activity content.
Used by subclasses, if needed.
"""
content = QtWidgets.QWidget()
# Add scroll.
mainscroll = QtWidgets.QScrollArea()
### mainscroll.setFrameShape(QtWidgets.QFrame.NoFrame)
mainscroll.setWidget(content)
mainscroll.setWidgetResizable(True)
mainlayout = QtWidgets.QVBoxLayout()
mainlayout.setContentsMargins(0, 0, 0, 0)
mainlayout.setSpacing(0)
mainlayout.addWidget(mainscroll)
self.setLayout(mainlayout)
return content
def _write_to_status_bar(self, message):
""" Used to write short messages to the main window status bar. """
self._parent.statusBar().showMessage(message)
def _write_to_log(self, message):
"""
Used to write log messages. Depending on the main window
settings they will appear on different locations, for example
in log file and/or in the Log tool window.
"""
self._parent.write_to_log(message)
|
py | 7df86d3e886ba90d75403585f7713849757fcf36 | from typing import List
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def inorderTraversal(self, root: TreeNode) -> List[int]:
"""
Time complexity: O(N) since visit exactly N nodes once.
Space complexity: O(N) for skew binary tree; O(log N) for balance binary tree.
Space due to recursive depth stack space
Runtime: 32 ms, faster than 92.54% of Python3 online submissions for Binary Tree Inorder Traversal.
"""
def values(node: TreeNode, nums: List[int]) -> List[int]:
if not node:
return nums
values(node.left, nums)
nums.append(node.val)
values(node.right, nums)
return nums
return values(root, [])
|
py | 7df86d4df1249adaaaf690aff50a56b57f459880 | #----------------------------------------------------------------------
# Name: wxPython.lib.editor.Editor
# Purpose: An intelligent text editor with colorization capabilities.
#
# Original
# Authors: Dirk Holtwic, Robin Dunn
#
# New
# Authors: Adam Feuer, Steve Howell
#
# History:
# This code used to support a fairly complex subclass that did
# syntax coloring and outliner collapse mode. Adam and Steve
# inherited the code, and added a lot of basic editor
# functionality that had not been there before, such as cut-and-paste.
#
#
# Created: 15-Dec-1999
# RCS-ID: $Id: editor.py 51004 2008-01-03 08:17:39Z RD $
# Copyright: (c) 1999 by Dirk Holtwick, 1999
# Licence: wxWindows license
#----------------------------------------------------------------------
# 12/14/2003 - Jeff Grimmett ([email protected])
#
# o 2.5 compatability update.
#
# 12/21/2003 - Jeff Grimmett ([email protected])
#
# o wxEditor -> Editor
#
import os
import time
import wx
import selection
import images
#----------------------------
def ForceBetween(min, val, max):
if val > max:
return max
if val < min:
return min
return val
def LineTrimmer(lineOfText):
if len(lineOfText) == 0:
return ""
elif lineOfText[-1] == '\r':
return lineOfText[:-1]
else:
return lineOfText
def LineSplitter(text):
return map (LineTrimmer, text.split('\n'))
#----------------------------
class Scroller:
def __init__(self, parent):
self.parent = parent
self.ow = 0
self.oh = 0
self.ox = 0
self.oy = 0
def SetScrollbars(self, fw, fh, w, h, x, y):
if (self.ow != w or self.oh != h or self.ox != x or self.oy != y):
self.parent.SetScrollbars(fw, fh, w, h, x, y)
self.ow = w
self.oh = h
self.ox = x
self.oy = y
#----------------------------------------------------------------------
class Editor(wx.ScrolledWindow):
def __init__(self, parent, id,
pos=wx.DefaultPosition, size=wx.DefaultSize, style=0):
wx.ScrolledWindow.__init__(self, parent, id,
pos, size,
style|wx.WANTS_CHARS)
self.isDrawing = False
self.InitCoords()
self.InitFonts()
self.SetColors()
self.MapEvents()
self.LoadImages()
self.InitDoubleBuffering()
self.InitScrolling()
self.SelectOff()
self.SetFocus()
self.SetText([""])
self.SpacesPerTab = 4
##------------------ Init stuff
def InitCoords(self):
self.cx = 0
self.cy = 0
self.oldCx = 0
self.oldCy = 0
self.sx = 0
self.sy = 0
self.sw = 0
self.sh = 0
self.sco_x = 0
self.sco_y = 0
def MapEvents(self):
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_MOTION, self.OnMotion)
self.Bind(wx.EVT_SCROLLWIN, self.OnScroll)
self.Bind(wx.EVT_CHAR, self.OnChar)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_WINDOW_DESTROY, self.OnDestroy)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground)
##------------------- Platform-specific stuff
def NiceFontForPlatform(self):
if wx.Platform == "__WXMSW__":
font = wx.Font(10, wx.MODERN, wx.NORMAL, wx.NORMAL)
else:
font = wx.Font(12, wx.MODERN, wx.NORMAL, wx.NORMAL, False)
if wx.Platform == "__WXMAC__":
font.SetNoAntiAliasing()
return font
def UnixKeyHack(self, key):
#
# this will be obsolete when we get the new wxWindows patch
#
# 12/14/03 - jmg
#
# Which patch? I don't know if this is needed, but I don't know
# why it's here either. Play it safe; leave it in.
#
if key <= 26:
key += ord('a') - 1
return key
##-------------------- UpdateView/Cursor code
def OnSize(self, event):
self.AdjustScrollbars()
self.SetFocus()
def SetCharDimensions(self):
# TODO: We need a code review on this. It appears that Linux
# improperly reports window dimensions when the scrollbar's there.
self.bw, self.bh = self.GetClientSize()
if wx.Platform == "__WXMSW__":
self.sh = self.bh / self.fh
self.sw = (self.bw / self.fw) - 1
else:
self.sh = self.bh / self.fh
if self.LinesInFile() >= self.sh:
self.bw = self.bw - wx.SystemSettings_GetMetric(wx.SYS_VSCROLL_X)
self.sw = (self.bw / self.fw) - 1
self.sw = (self.bw / self.fw) - 1
if self.CalcMaxLineLen() >= self.sw:
self.bh = self.bh - wx.SystemSettings_GetMetric(wx.SYS_HSCROLL_Y)
self.sh = self.bh / self.fh
def UpdateView(self, dc = None):
if dc is None:
dc = wx.ClientDC(self)
if dc.Ok():
self.SetCharDimensions()
self.KeepCursorOnScreen()
self.DrawSimpleCursor(0,0, dc, True)
self.Draw(dc)
def OnPaint(self, event):
dc = wx.PaintDC(self)
if self.isDrawing:
return
self.isDrawing = True
self.UpdateView(dc)
wx.CallAfter(self.AdjustScrollbars)
self.isDrawing = False
def OnEraseBackground(self, evt):
pass
##-------------------- Drawing code
def InitFonts(self):
dc = wx.ClientDC(self)
self.font = self.NiceFontForPlatform()
dc.SetFont(self.font)
self.fw = dc.GetCharWidth()
self.fh = dc.GetCharHeight()
def SetColors(self):
self.fgColor = wx.NamedColour('black')
self.bgColor = wx.NamedColour('white')
self.selectColor = wx.Colour(238, 220, 120) # r, g, b = emacsOrange
def InitDoubleBuffering(self):
pass
def DrawEditText(self, t, x, y, dc):
dc.DrawText(t, x * self.fw, y * self.fh)
def DrawLine(self, line, dc):
if self.IsLine(line):
l = line
t = self.lines[l]
dc.SetTextForeground(self.fgColor)
fragments = selection.Selection(
self.SelectBegin, self.SelectEnd,
self.sx, self.sw, line, t)
x = 0
for (data, selected) in fragments:
if selected:
dc.SetTextBackground(self.selectColor)
if x == 0 and len(data) == 0 and len(fragments) == 1:
data = ' '
else:
dc.SetTextBackground(self.bgColor)
self.DrawEditText(data, x, line - self.sy, dc)
x += len(data)
def Draw(self, odc=None):
if not odc:
odc = wx.ClientDC(self)
dc = wx.BufferedDC(odc)
if dc.IsOk():
dc.SetFont(self.font)
dc.SetBackgroundMode(wx.SOLID)
dc.SetTextBackground(self.bgColor)
dc.SetTextForeground(self.fgColor)
dc.Clear()
for line in range(self.sy, self.sy + self.sh):
self.DrawLine(line, dc)
if len(self.lines) < self.sh + self.sy:
self.DrawEofMarker(dc)
self.DrawCursor(dc)
##------------------ eofMarker stuff
def LoadImages(self):
self.eofMarker = images.EofImage.GetBitmap()
def DrawEofMarker(self,dc):
x = 0
y = (len(self.lines) - self.sy) * self.fh
hasTransparency = 1
dc.DrawBitmap(self.eofMarker, x, y, hasTransparency)
##------------------ cursor-related functions
def DrawCursor(self, dc = None):
if not dc:
dc = wx.ClientDC(self)
if (self.LinesInFile())<self.cy: #-1 ?
self.cy = self.LinesInFile()-1
s = self.lines[self.cy]
x = self.cx - self.sx
y = self.cy - self.sy
self.DrawSimpleCursor(x, y, dc)
def DrawSimpleCursor(self, xp, yp, dc = None, old=False):
if not dc:
dc = wx.ClientDC(self)
if old:
xp = self.sco_x
yp = self.sco_y
szx = self.fw
szy = self.fh
x = xp * szx
y = yp * szy
dc.Blit(x,y, szx,szy, dc, x,y, wx.SRC_INVERT)
self.sco_x = xp
self.sco_y = yp
##-------- Enforcing screen boundaries, cursor movement
def CalcMaxLineLen(self):
"""get length of longest line on screen"""
maxlen = 0
for line in self.lines[self.sy:self.sy+self.sh]:
if len(line) >maxlen:
maxlen = len(line)
return maxlen
def KeepCursorOnScreen(self):
self.sy = ForceBetween(max(0, self.cy-self.sh), self.sy, self.cy)
self.sx = ForceBetween(max(0, self.cx-self.sw), self.sx, self.cx)
self.AdjustScrollbars()
def HorizBoundaries(self):
self.SetCharDimensions()
maxLineLen = self.CalcMaxLineLen()
self.sx = ForceBetween(0, self.sx, max(self.sw, maxLineLen - self.sw + 1))
self.cx = ForceBetween(self.sx, self.cx, self.sx + self.sw - 1)
def VertBoundaries(self):
self.SetCharDimensions()
self.sy = ForceBetween(0, self.sy, max(self.sh, self.LinesInFile() - self.sh + 1))
self.cy = ForceBetween(self.sy, self.cy, self.sy + self.sh - 1)
def cVert(self, num):
self.cy = self.cy + num
self.cy = ForceBetween(0, self.cy, self.LinesInFile() - 1)
self.sy = ForceBetween(self.cy - self.sh + 1, self.sy, self.cy)
self.cx = min(self.cx, self.CurrentLineLength())
def cHoriz(self, num):
self.cx = self.cx + num
self.cx = ForceBetween(0, self.cx, self.CurrentLineLength())
self.sx = ForceBetween(self.cx - self.sw + 1, self.sx, self.cx)
def AboveScreen(self, row):
return row < self.sy
def BelowScreen(self, row):
return row >= self.sy + self.sh
def LeftOfScreen(self, col):
return col < self.sx
def RightOfScreen(self, col):
return col >= self.sx + self.sw
##----------------- data structure helper functions
def GetText(self):
return self.lines
def SetText(self, lines):
self.InitCoords()
self.lines = lines
self.UnTouchBuffer()
self.SelectOff()
self.AdjustScrollbars()
self.UpdateView(None)
def IsLine(self, lineNum):
return (0<=lineNum) and (lineNum<self.LinesInFile())
def GetTextLine(self, lineNum):
if self.IsLine(lineNum):
return self.lines[lineNum]
return ""
def SetTextLine(self, lineNum, text):
if self.IsLine(lineNum):
self.lines[lineNum] = text
def CurrentLineLength(self):
return len(self.lines[self.cy])
def LinesInFile(self):
return len(self.lines)
def UnTouchBuffer(self):
self.bufferTouched = False
def BufferWasTouched(self):
return self.bufferTouched
def TouchBuffer(self):
self.bufferTouched = True
##-------------------------- Mouse scroll timing functions
def InitScrolling(self):
# we don't rely on the windows system to scroll for us; we just
# redraw the screen manually every time
self.EnableScrolling(False, False)
self.nextScrollTime = 0
self.SCROLLDELAY = 0.050 # seconds
self.scrollTimer = wx.Timer(self)
self.scroller = Scroller(self)
def CanScroll(self):
if time.time() > self.nextScrollTime:
self.nextScrollTime = time.time() + self.SCROLLDELAY
return True
else:
return False
def SetScrollTimer(self):
oneShot = True
self.scrollTimer.Start(1000*self.SCROLLDELAY/2, oneShot)
self.Bind(wx.EVT_TIMER, self.OnTimer)
def OnTimer(self, event):
screenX, screenY = wx.GetMousePosition()
x, y = self.ScreenToClientXY(screenX, screenY)
self.MouseToRow(y)
self.MouseToCol(x)
self.SelectUpdate()
##-------------------------- Mouse off screen functions
def HandleAboveScreen(self, row):
self.SetScrollTimer()
if self.CanScroll():
row = self.sy - 1
row = max(0, row)
self.cy = row
def HandleBelowScreen(self, row):
self.SetScrollTimer()
if self.CanScroll():
row = self.sy + self.sh
row = min(row, self.LinesInFile() - 1)
self.cy = row
def HandleLeftOfScreen(self, col):
self.SetScrollTimer()
if self.CanScroll():
col = self.sx - 1
col = max(0,col)
self.cx = col
def HandleRightOfScreen(self, col):
self.SetScrollTimer()
if self.CanScroll():
col = self.sx + self.sw
col = min(col, self.CurrentLineLength())
self.cx = col
##------------------------ mousing functions
def MouseToRow(self, mouseY):
row = self.sy + (mouseY/ self.fh)
if self.AboveScreen(row):
self.HandleAboveScreen(row)
elif self.BelowScreen(row):
self.HandleBelowScreen(row)
else:
self.cy = min(row, self.LinesInFile() - 1)
def MouseToCol(self, mouseX):
col = self.sx + (mouseX / self.fw)
if self.LeftOfScreen(col):
self.HandleLeftOfScreen(col)
elif self.RightOfScreen(col):
self.HandleRightOfScreen(col)
else:
self.cx = min(col, self.CurrentLineLength())
def MouseToCursor(self, event):
self.MouseToRow(event.GetY())
self.MouseToCol(event.GetX())
def OnMotion(self, event):
if event.LeftIsDown() and self.HasCapture():
self.Selecting = True
self.MouseToCursor(event)
self.SelectUpdate()
def OnLeftDown(self, event):
self.MouseToCursor(event)
self.SelectBegin = (self.cy, self.cx)
self.SelectEnd = None
self.UpdateView()
self.CaptureMouse()
self.SetFocus()
def OnLeftUp(self, event):
if not self.HasCapture():
return
if self.SelectEnd is None:
self.OnClick()
else:
self.Selecting = False
self.SelectNotify(False, self.SelectBegin, self.SelectEnd)
self.ReleaseMouse()
self.scrollTimer.Stop()
#------------------------- Scrolling
def HorizScroll(self, event, eventType):
maxLineLen = self.CalcMaxLineLen()
if eventType == wx.EVT_SCROLLWIN_LINEUP:
self.sx -= 1
elif eventType == wx.EVT_SCROLLWIN_LINEDOWN:
self.sx += 1
elif eventType == wx.EVT_SCROLLWIN_PAGEUP:
self.sx -= self.sw
elif eventType == wx.EVT_SCROLLWIN_PAGEDOWN:
self.sx += self.sw
elif eventType == wx.EVT_SCROLLWIN_TOP:
self.sx = self.cx = 0
elif eventType == wx.EVT_SCROLLWIN_BOTTOM:
self.sx = maxLineLen - self.sw
self.cx = maxLineLen
else:
self.sx = event.GetPosition()
self.HorizBoundaries()
def VertScroll(self, event, eventType):
if eventType == wx.EVT_SCROLLWIN_LINEUP:
self.sy -= 1
elif eventType == wx.EVT_SCROLLWIN_LINEDOWN:
self.sy += 1
elif eventType == wx.EVT_SCROLLWIN_PAGEUP:
self.sy -= self.sh
elif eventType == wx.EVT_SCROLLWIN_PAGEDOWN:
self.sy += self.sh
elif eventType == wx.EVT_SCROLLWIN_TOP:
self.sy = self.cy = 0
elif eventType == wx.EVT_SCROLLWIN_BOTTOM:
self.sy = self.LinesInFile() - self.sh
self.cy = self.LinesInFile()
else:
self.sy = event.GetPosition()
self.VertBoundaries()
def OnScroll(self, event):
dir = event.GetOrientation()
eventType = event.GetEventType()
if dir == wx.HORIZONTAL:
self.HorizScroll(event, eventType)
else:
self.VertScroll(event, eventType)
self.UpdateView()
def AdjustScrollbars(self):
if self:
for i in range(2):
self.SetCharDimensions()
self.scroller.SetScrollbars(
self.fw, self.fh,
self.CalcMaxLineLen()+3, max(self.LinesInFile()+1, self.sh),
self.sx, self.sy)
#------------ backspace, delete, return
def BreakLine(self, event):
if self.IsLine(self.cy):
t = self.lines[self.cy]
self.lines = self.lines[:self.cy] + [t[:self.cx],t[self.cx:]] + self.lines[self.cy+1:]
self.cVert(1)
self.cx = 0
self.TouchBuffer()
def InsertChar(self,char):
if self.IsLine(self.cy):
t = self.lines[self.cy]
t = t[:self.cx] + char + t[self.cx:]
self.SetTextLine(self.cy, t)
self.cHoriz(1)
self.TouchBuffer()
def JoinLines(self):
t1 = self.lines[self.cy]
t2 = self.lines[self.cy+1]
self.cx = len(t1)
self.lines = self.lines[:self.cy] + [t1 + t2] + self.lines[self.cy+2:]
self.TouchBuffer()
def DeleteChar(self,x,y,oldtext):
newtext = oldtext[:x] + oldtext[x+1:]
self.SetTextLine(y, newtext)
self.TouchBuffer()
def BackSpace(self, event):
t = self.GetTextLine(self.cy)
if self.cx>0:
self.DeleteChar(self.cx-1,self.cy,t)
self.cHoriz(-1)
self.TouchBuffer()
elif self.cx == 0:
if self.cy > 0:
self.cy -= 1
self.JoinLines()
self.TouchBuffer()
else:
wx.Bell()
def Delete(self, event):
t = self.GetTextLine(self.cy)
if self.cx<len(t):
self.DeleteChar(self.cx,self.cy,t)
self.TouchBuffer()
else:
if self.cy < len(self.lines) - 1:
self.JoinLines()
self.TouchBuffer()
def Escape(self, event):
self.SelectOff()
def TabKey(self, event):
numSpaces = self.SpacesPerTab - (self.cx % self.SpacesPerTab)
self.SingleLineInsert(' ' * numSpaces)
##----------- selection routines
def SelectUpdate(self):
self.SelectEnd = (self.cy, self.cx)
self.SelectNotify(self.Selecting, self.SelectBegin, self.SelectEnd)
self.UpdateView()
def NormalizedSelect(self):
(begin, end) = (self.SelectBegin, self.SelectEnd)
(bRow, bCol) = begin
(eRow, eCol) = end
if (bRow < eRow):
return (begin, end)
elif (eRow < bRow):
return (end, begin)
else:
if (bCol < eCol):
return (begin, end)
else:
return (end, begin)
def FindSelection(self):
if self.SelectEnd is None or self.SelectBegin is None:
wx.Bell()
return None
(begin, end) = self.NormalizedSelect()
(bRow, bCol) = begin
(eRow, eCol) = end
return (bRow, bCol, eRow, eCol)
def SelectOff(self):
self.SelectBegin = None
self.SelectEnd = None
self.Selecting = False
self.SelectNotify(False,None,None)
def CopySelection(self, event):
selection = self.FindSelection()
if selection is None:
return
(bRow, bCol, eRow, eCol) = selection
if bRow == eRow:
self.SingleLineCopy(bRow, bCol, eCol)
else:
self.MultipleLineCopy(bRow, bCol, eRow, eCol)
def OnCopySelection(self, event):
self.CopySelection(event)
self.SelectOff()
def CopyToClipboard(self, linesOfText):
do = wx.TextDataObject()
do.SetText(os.linesep.join(linesOfText))
wx.TheClipboard.Open()
wx.TheClipboard.SetData(do)
wx.TheClipboard.Close()
def SingleLineCopy(self, Row, bCol, eCol):
Line = self.GetTextLine(Row)
self.CopyToClipboard([Line[bCol:eCol]])
def MultipleLineCopy(self, bRow, bCol, eRow, eCol):
bLine = self.GetTextLine(bRow)[bCol:]
eLine = self.GetTextLine(eRow)[:eCol]
self.CopyToClipboard([bLine] + [l for l in self.lines[bRow + 1:eRow]] + [eLine])
def OnDeleteSelection(self, event):
selection = self.FindSelection()
if selection is None:
return
(bRow, bCol, eRow, eCol) = selection
if bRow == eRow:
self.SingleLineDelete(bRow, bCol, eCol)
else:
self.MultipleLineDelete(bRow, bCol, eRow, eCol)
self.TouchBuffer()
self.cy = bRow
self.cx = bCol
self.SelectOff()
self.UpdateView()
def SingleLineDelete(self, Row, bCol, eCol):
ModLine = self.GetTextLine(Row)
ModLine = ModLine[:bCol] + ModLine[eCol:]
self.SetTextLine(Row,ModLine)
def MultipleLineDelete(self, bRow, bCol, eRow, eCol):
bLine = self.GetTextLine(bRow)
eLine = self.GetTextLine(eRow)
ModLine = bLine[:bCol] + eLine[eCol:]
self.lines[bRow:eRow + 1] = [ModLine]
def OnPaste(self, event):
do = wx.TextDataObject()
wx.TheClipboard.Open()
success = wx.TheClipboard.GetData(do)
wx.TheClipboard.Close()
if success:
pastedLines = LineSplitter(do.GetText())
else:
wx.Bell()
return
if len(pastedLines) == 0:
wx.Bell()
return
elif len(pastedLines) == 1:
self.SingleLineInsert(pastedLines[0])
else:
self.MultipleLinePaste(pastedLines)
def SingleLineInsert(self, newText):
ModLine = self.GetTextLine(self.cy)
ModLine = ModLine[:self.cx] + newText + ModLine[self.cx:]
self.SetTextLine(self.cy, ModLine)
self.cHoriz(len(newText))
self.TouchBuffer()
self.UpdateView()
def MultipleLinePaste(self, pastedLines):
FirstLine = LastLine = self.GetTextLine(self.cy)
FirstLine = FirstLine[:self.cx] + pastedLines[0]
LastLine = pastedLines[-1] + LastLine[self.cx:]
NewSlice = [FirstLine]
NewSlice += [l for l in pastedLines[1:-1]]
NewSlice += [LastLine]
self.lines[self.cy:self.cy + 1] = NewSlice
self.cy = self.cy + len(pastedLines)-1
self.cx = len(pastedLines[-1])
self.TouchBuffer()
self.UpdateView()
def OnCutSelection(self,event):
self.CopySelection(event)
self.OnDeleteSelection(event)
#-------------- Keyboard movement implementations
def MoveDown(self, event):
self.cVert(+1)
def MoveUp(self, event):
self.cVert(-1)
def MoveLeft(self, event):
if self.cx == 0:
if self.cy == 0:
wx.Bell()
else:
self.cVert(-1)
self.cx = self.CurrentLineLength()
else:
self.cx -= 1
def MoveRight(self, event):
linelen = self.CurrentLineLength()
if self.cx == linelen:
if self.cy == len(self.lines) - 1:
wx.Bell()
else:
self.cx = 0
self.cVert(1)
else:
self.cx += 1
def MovePageDown(self, event):
self.cVert(self.sh)
def MovePageUp(self, event):
self.cVert(-self.sh)
def MoveHome(self, event):
self.cx = 0
def MoveEnd(self, event):
self.cx = self.CurrentLineLength()
def MoveStartOfFile(self, event):
self.cy = 0
self.cx = 0
def MoveEndOfFile(self, event):
self.cy = len(self.lines) - 1
self.cx = self.CurrentLineLength()
#-------------- Key handler mapping tables
def SetMoveSpecialFuncs(self, action):
action[wx.WXK_DOWN] = self.MoveDown
action[wx.WXK_UP] = self.MoveUp
action[wx.WXK_LEFT] = self.MoveLeft
action[wx.WXK_RIGHT] = self.MoveRight
action[wx.WXK_NEXT] = self.MovePageDown
action[wx.WXK_PRIOR] = self.MovePageUp
action[wx.WXK_HOME] = self.MoveHome
action[wx.WXK_END] = self.MoveEnd
def SetMoveSpecialControlFuncs(self, action):
action[wx.WXK_HOME] = self.MoveStartOfFile
action[wx.WXK_END] = self.MoveEndOfFile
def SetAltFuncs(self, action):
# subclass implements
pass
def SetControlFuncs(self, action):
action['c'] = self.OnCopySelection
action['d'] = self.OnDeleteSelection
action['v'] = self.OnPaste
action['x'] = self.OnCutSelection
def SetSpecialControlFuncs(self, action):
action[wx.WXK_INSERT] = self.OnCopySelection
def SetShiftFuncs(self, action):
action[wx.WXK_DELETE] = self.OnCutSelection
action[wx.WXK_INSERT] = self.OnPaste
def SetSpecialFuncs(self, action):
action[wx.WXK_BACK] = self.BackSpace
action[wx.WXK_DELETE] = self.Delete
action[wx.WXK_RETURN] = self.BreakLine
action[wx.WXK_ESCAPE] = self.Escape
action[wx.WXK_TAB] = self.TabKey
##-------------- Logic for key handlers
def Move(self, keySettingFunction, key, event):
action = {}
keySettingFunction(action)
if not action.has_key(key):
return False
if event.ShiftDown():
if not self.Selecting:
self.Selecting = True
self.SelectBegin = (self.cy, self.cx)
action[key](event)
self.SelectEnd = (self.cy, self.cx)
else:
action[key](event)
if self.Selecting:
self.Selecting = False
self.SelectNotify(self.Selecting, self.SelectBegin, self.SelectEnd)
self.UpdateView()
return True
def MoveSpecialKey(self, event, key):
return self.Move(self.SetMoveSpecialFuncs, key, event)
def MoveSpecialControlKey(self, event, key):
if not event.ControlDown():
return False
return self.Move(self.SetMoveSpecialControlFuncs, key, event)
def Dispatch(self, keySettingFunction, key, event):
action = {}
keySettingFunction(action)
if action.has_key(key):
action[key](event)
self.UpdateView()
return True
return False
def ModifierKey(self, key, event, modifierKeyDown, MappingFunc):
if not modifierKeyDown:
return False
key = self.UnixKeyHack(key)
try:
key = chr(key)
except:
return False
if not self.Dispatch(MappingFunc, key, event):
wx.Bell()
return True
def ControlKey(self, event, key):
return self.ModifierKey(key, event, event.ControlDown(), self.SetControlFuncs)
def AltKey(self, event, key):
return self.ModifierKey(key, event, event.AltDown(), self.SetAltFuncs)
def SpecialControlKey(self, event, key):
if not event.ControlDown():
return False
if not self.Dispatch(self.SetSpecialControlFuncs, key, event):
wx.Bell()
return True
def ShiftKey(self, event, key):
if not event.ShiftDown():
return False
return self.Dispatch(self.SetShiftFuncs, key, event)
def NormalChar(self, event, key):
self.SelectOff()
# regular ascii
if not self.Dispatch(self.SetSpecialFuncs, key, event):
if (key>31) and (key<256):
self.InsertChar(chr(key))
else:
wx.Bell()
return
self.UpdateView()
self.AdjustScrollbars()
def OnChar(self, event):
key = event.GetKeyCode()
filters = [self.AltKey,
self.MoveSpecialControlKey,
self.ControlKey,
self.SpecialControlKey,
self.MoveSpecialKey,
self.ShiftKey,
self.NormalChar]
for filter in filters:
if filter(event,key):
break
return 0
#----------------------- Eliminate memory leaks
def OnDestroy(self, event):
self.mdc = None
self.odc = None
self.bgColor = None
self.fgColor = None
self.font = None
self.selectColor = None
self.scrollTimer = None
self.eofMarker = None
#-------------------- Abstract methods for subclasses
def OnClick(self):
pass
def SelectNotify(self, Selecting, SelectionBegin, SelectionEnd):
pass
|
py | 7df86d8730019f216995c529e6360434759fb2bf | import os
import functools
import yaml
import numpy as np
import math
import torch
import shutil
import torchvision.transforms as transforms
from torch.autograd import Variable
from collections import namedtuple
class MyDumper(yaml.Dumper):
def increase_indent(self, flow=False, indentless=False):
return super(MyDumper, self).increase_indent(flow, False)
Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat')
PRIMITIVES = [
'none',
'noise',
'max_pool_3x3',
'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5'
]
def singleton(cls, *args, **kw):
instances = dict()
@functools.wraps(cls)
def _fun(*clsargs, **clskw):
if cls not in instances:
instances[cls] = cls(*clsargs, **clskw)
return instances[cls]
_fun.cls = cls # make sure cls can be obtained
return _fun
class EVLocalAvg(object):
def __init__(self, window=5, ev_freq=2, total_epochs=50):
""" Keep track of the eigenvalues local average.
Args:
window (int): number of elements used to compute local average.
Default: 5
ev_freq (int): frequency used to compute eigenvalues. Default:
every 2 epochs
total_epochs (int): total number of epochs that DARTS runs.
Default: 50
"""
self.window = window
self.ev_freq = ev_freq
self.epochs = total_epochs
self.stop_search = False
self.stop_epoch = total_epochs - 1
self.stop_genotype = None
self.ev = []
self.ev_local_avg = []
self.genotypes = {}
self.la_epochs = {}
# start and end index of the local average window
self.la_start_idx = 0
self.la_end_idx = self.window
def reset(self):
self.ev = []
self.ev_local_avg = []
self.genotypes = {}
self.la_epochs = {}
def update(self, epoch, ev, genotype):
""" Method to update the local average list.
Args:
epoch (int): current epoch
ev (float): current dominant eigenvalue
genotype (namedtuple): current genotype
"""
self.ev.append(ev)
self.genotypes.update({epoch: genotype})
# set the stop_genotype to the current genotype in case the early stop
# procedure decides not to early stop
self.stop_genotype = genotype
# since the local average computation starts after the dominant
# eigenvalue in the first epoch is already computed we have to wait
# at least until we have 3 eigenvalues in the list.
if (len(self.ev) >= int(np.ceil(self.window/2))) and (epoch <
self.epochs - 1):
# start sliding the window as soon as the number of eigenvalues in
# the list becomes equal to the window size
if len(self.ev) < self.window:
self.ev_local_avg.append(np.mean(self.ev))
else:
assert len(self.ev[self.la_start_idx: self.la_end_idx]) == self.window
self.ev_local_avg.append(np.mean(self.ev[self.la_start_idx:
self.la_end_idx]))
self.la_start_idx += 1
self.la_end_idx += 1
# keep track of the offset between the current epoch and the epoch
# corresponding to the local average. NOTE: in the end the size of
# self.ev and self.ev_local_avg should be equal
self.la_epochs.update({epoch: int(epoch -
int(self.ev_freq*np.floor(self.window/2)))})
elif len(self.ev) < int(np.ceil(self.window/2)):
self.la_epochs.update({epoch: -1})
# since there is an offset between the current epoch and the local
# average epoch, loop in the last epoch to compute the local average of
# these number of elements: window, window - 1, window - 2, ..., ceil(window/2)
elif epoch == self.epochs - 1:
for i in range(int(np.ceil(self.window/2))):
assert len(self.ev[self.la_start_idx: self.la_end_idx]) == self.window - i
self.ev_local_avg.append(np.mean(self.ev[self.la_start_idx:
self.la_end_idx + 1]))
self.la_start_idx += 1
def early_stop(self, epoch, factor=1.3, es_start_epoch=10, delta=4):
""" Early stopping criterion
Args:
epoch (int): current epoch
factor (float): threshold factor for the ration between the current
and prefious eigenvalue. Default: 1.3
es_start_epoch (int): until this epoch do not consider early
stopping. Default: 20
delta (int): factor influencing which previous local average we
consider for early stopping. Default: 2
"""
if int(self.la_epochs[epoch] - self.ev_freq*delta) >= es_start_epoch:
# the current local average corresponds to
# epoch - int(self.ev_freq*np.floor(self.window/2))
current_la = self.ev_local_avg[-1]
# by default take the local average corresponding to epoch
# delta*self.ev_freq
previous_la = self.ev_local_avg[-1 - delta]
self.stop_search = current_la / previous_la > factor
if self.stop_search:
self.stop_epoch = int(self.la_epochs[epoch] - self.ev_freq*delta)
self.stop_genotype = self.genotypes[self.stop_epoch]
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
@singleton
class DecayScheduler(object):
def __init__(self, base_lr=1.0, last_iter=-1, T_max=50, T_start=0, T_stop=50, decay_type='cosine'):
self.base_lr = base_lr
self.T_max = T_max
self.T_start = T_start
self.T_stop = T_stop
self.cnt = 0
self.decay_type = decay_type
self.decay_rate = 1.0
def step(self, epoch):
if epoch >= self.T_start:
if self.decay_type == "cosine":
self.decay_rate = self.base_lr * (1 + math.cos(math.pi * epoch / (self.T_max - self.T_start))) / 2.0 if epoch <= self.T_stop else self.decay_rate
elif self.decay_type == "slow_cosine":
self.decay_rate = self.base_lr * math.cos((math.pi/2) * epoch / (self.T_max - self.T_start)) if epoch <= self.T_stop else self.decay_rate
elif self.decay_type == "linear":
self.decay_rate = self.base_lr * (self.T_max - epoch) / (self.T_max - self.T_start) if epoch <= self.T_stop else self.decay_rate
else:
self.decay_rate = self.base_lr
else:
self.decay_rate = self.base_lr
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0)
res.append(correct_k.mul_(100.0/batch_size))
return res
def write_yaml_results_eval(args, results_file, result_to_log):
setting = '_'.join([args.space, args.dataset])
regularization = '_'.join(
[str(args.search_dp), str(args.search_wd)]
)
results_file = os.path.join(args._save, results_file+'.yaml')
try:
with open(results_file, 'r') as f:
result = yaml.load(f)
if setting in result.keys():
if regularization in result[setting].keys():
if args.search_task_id in result[setting][regularization]:
result[setting][regularization][args.search_task_id].append(result_to_log)
else:
result[setting][regularization].update({args.search_task_id:
[result_to_log]})
else:
result[setting].update({regularization: {args.search_task_id:
[result_to_log]}})
else:
result.update({setting: {regularization: {args.search_task_id:
[result_to_log]}}})
with open(results_file, 'w') as f:
yaml.dump(result, f, Dumper=MyDumper, default_flow_style=False)
except (AttributeError, FileNotFoundError) as e:
result = {
setting: {
regularization: {
args.search_task_id: [result_to_log]
}
}
}
with open(results_file, 'w') as f:
yaml.dump(result, f, Dumper=MyDumper, default_flow_style=False)
def write_yaml_results(args, results_file, result_to_log):
setting = '_'.join([args.space, args.dataset])
regularization = '_'.join(
[str(args.drop_path_prob), str(args.weight_decay)]
)
results_file = os.path.join(args._save, results_file+'.yaml')
try:
with open(results_file, 'r') as f:
result = yaml.load(f)
if setting in result.keys():
if regularization in result[setting].keys():
result[setting][regularization].update({args.task_id: result_to_log})
else:
result[setting].update({regularization: {args.task_id: result_to_log}})
else:
result.update({setting: {regularization: {args.task_id: result_to_log}}})
with open(results_file, 'w') as f:
yaml.dump(result, f, Dumper=MyDumper, default_flow_style=False)
except (AttributeError, FileNotFoundError) as e:
result = {
setting: {
regularization: {
args.task_id: result_to_log
}
}
}
with open(results_file, 'w') as f:
yaml.dump(result, f, Dumper=MyDumper, default_flow_style=False)
class Cutout(object):
def __init__(self, length, prob=1.0):
self.length = length
self.prob = prob
def __call__(self, img):
if np.random.binomial(1, self.prob):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
def _data_transforms_svhn(args):
SVHN_MEAN = [0.4377, 0.4438, 0.4728]
SVHN_STD = [0.1980, 0.2010, 0.1970]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(SVHN_MEAN, SVHN_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(SVHN_MEAN, SVHN_STD),
])
return train_transform, valid_transform
def _data_transforms_cifar100(args):
CIFAR_MEAN = [0.5071, 0.4865, 0.4409]
CIFAR_STD = [0.2673, 0.2564, 0.2762]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
def _data_transforms_cifar10(args):
CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length,
args.cutout_prob))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
def count_parameters_in_MB(model):
return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if "auxiliary" not in name)/1e6
def save(model, model_path):
torch.save(model.state_dict(), model_path)
def load(model, model_path):
model.load_state_dict(torch.load(model_path))
def save_checkpoint(state, is_best, save, epoch, task_id):
filename = "checkpoint_{}_{}.pth.tar".format(task_id, epoch)
filename = os.path.join(save, filename)
torch.save(state, filename)
if is_best:
best_filename = os.path.join(save, 'model_best.pth.tar')
shutil.copyfile(filename, best_filename)
def load_checkpoint(model, optimizer, scheduler, architect, save, la_tracker,
epoch, task_id):
filename = "checkpoint_{}_{}.pth.tar".format(task_id, epoch)
filename = os.path.join(save, filename)
if not model.args.disable_cuda:
checkpoint = torch.load(filename, map_location="cuda:{}".format(model.args.gpu))
else:
checkpoint = torch.load(filename,map_location=torch.device('cpu'))
model.load_state_dict(checkpoint['state_dict'])
model.alphas_normal.data = checkpoint['alphas_normal']
model.alphas_reduce.data = checkpoint['alphas_reduce']
optimizer.load_state_dict(checkpoint['optimizer'])
architect.optimizer.load_state_dict(checkpoint['arch_optimizer'])
la_tracker.ev = checkpoint['ev']
la_tracker.ev_local_avg = checkpoint['ev_local_avg']
la_tracker.genotypes = checkpoint['genotypes']
la_tracker.la_epochs = checkpoint['la_epochs']
la_tracker.la_start_idx = checkpoint['la_start_idx']
la_tracker.la_end_idx = checkpoint['la_end_idx']
lr = checkpoint['lr']
return lr
def drop_path(x, drop_prob):
if drop_prob > 0.:
keep_prob = 1.-drop_prob
mask = Variable(torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob))
x.div_(keep_prob)
x.mul_(mask)
return x
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
def print_args(args):
for arg, val in args.__dict__.items():
print(arg + '.' * (50 - len(arg) - len(str(val))) + str(val))
print()
def get_one_hot(alphas):
start = 0
n = 2
one_hot = torch.zeros(alphas.shape)
for i in range(4):
end = start + n
w = torch.nn.functional.softmax(alphas[start:end],
dim=-1).data.cpu().numpy().copy()
edges = sorted(range(i+2), key=lambda x: -max(w[x][k] for k in range(len(w[x]))))[:2]
for j in edges:
k_best = None
for k in range(len(w[j])):
if k_best is None or w[j][k] > w[j][k_best]:
k_best = k
one_hot[start+j][k_best] = 1
start = end
n += 1
return one_hot
|
py | 7df86da37a8d934698f2612a1c25779fe7e4eeee | #!venv/bin/python
"""
Author: Bryce Drew
"""
import os
import sys
import subprocess
sys.path.append(os.getcwd()+"/venv/lib/python2.7/site-packages")
import superphy
from superphy.shared import config
config.import_env()
#Debug allows the execution of arbitrary code. Do not use it with production
def run():
"""
This compiles files in the project, and restarts nessesary systems.
After this is run, if you have installed properly you will have a working superphy
"""
os.system("bash superphy/database/scripts/start.sh")
os.system("cd app/SuperPhy/static; bash compile.sh; cd ../../..")
os.system("sudo /etc/init.d/apache2 reload")
exit()
def install():
"""
.
"""
os.system("bash superphy/database/scripts/start.sh")
config.install()
exit()
def uploader():
"""
.
"""
superphy.upload.foo()
exit()
def shell():
"""
.
"""
import code
code.interact(local=dict(globals(), **locals()))
def test():
"""
.
"""
config.start_database("testing")
from superphy.upload import main as upload
upload.init()
subprocess.call(
"for f in superphy/src/*; do echo $f; nosetests $f -vv --exe; done",
shell=True
)
OPTIONS = {
"install" : install,
"run" : run,
"upload" : uploader,
"sparql" : superphy.shared.config.start_database,
"shell" : shell,
"test" : test
}
if __name__ == '__main__':
if len(sys.argv) >= 2:
if sys.argv[1] in OPTIONS:
OPTIONS[sys.argv[1]]()
else:
print "OPTIONS:"
for key in OPTIONS:
print key
else:
#Default
OPTIONS['run']()
|
py | 7df86e5dd3a9c6e60d18f403c582d2bea4b788d8 | import _plotly_utils.basevalidators
class TicklenValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="ticklen", parent_name="barpolar.marker.colorbar", **kwargs
):
super(TicklenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
py | 7df86e9ae26efa563446c6b8af94e0817eba7166 | # pylint: skip-file
import numpy as np
from absl.testing import parameterized
from tensorflow.python.keras import keras_parameterized
from tensorflow.keras.backend import floatx
import tensorflow as tf
from ..test_utils import layer_test, check_attributes
from . import GenericBlock, TrendBlock, SeasonalityBlock, BaseBlock
class ExampleBlock(BaseBlock):
def __init__(
self,
label_width: int,
n_neurons: int,
drop_rate: float,
g_trainable: bool = False,
interpretable: bool = False,
block_type: str = "BaseBlock",
):
super().__init__(
label_width=label_width,
n_neurons=n_neurons,
drop_rate=drop_rate,
g_trainable=g_trainable,
interpretable=interpretable,
block_type=block_type,
)
def coefficient_factory(self, *args: list, **kwargs: dict):
pass
def get_coefficients(self, output_last_dim: int, branch_name: str):
return tf.constant([0, 0, 0])
@keras_parameterized.run_all_keras_modes
class BlocksLayersTest(tf.test.TestCase, parameterized.TestCase):
"""
Unit tests for the nbeats model.
"""
@parameterized.parameters(
[ # BaseBlock attributes test
(
(10, 16, 0.5),
BaseBlock,
[
"label_width",
"drop_rate",
"is_interpretable",
"is_g_trainable",
"block_type",
],
[10, 0.5, False, False, "BaseBlock"],
),
# TrendBlock attributes test
(
(1, 2, 3, 0.0),
TrendBlock,
[
"label_width",
"p_degree",
"drop_rate",
"is_interpretable",
"is_g_trainable",
"block_type",
],
[1, 2, 0.0, True, False, "TrendBlock"],
),
# SeasonalityBlock attributes test
(
(10, [10], [10], [10], [10], 16, 0.5),
SeasonalityBlock,
[
"label_width",
"forecast_periods",
"backcast_periods",
"forecast_fourier_order",
"backcast_fourier_order",
"drop_rate",
"is_interpretable",
"is_g_trainable",
"block_type",
],
[10, [10], [10], [10], [10], 0.5, True, False, "SeasonalityBlock"],
),
# GenericBlock attributes test
(
(10, 10, 10, 16, 0.5),
GenericBlock,
[
"label_width",
"g_forecast_neurons",
"g_backcast_neurons",
"drop_rate",
"is_interpretable",
"is_g_trainable",
"block_type",
],
[10, 10, 10, 0.5, False, True, "GenericBlock"],
),
]
)
def test_attributes_and_property(self, args, cls, attributes, expected_values):
if cls == BaseBlock:
block = ExampleBlock(*args)
else:
block = cls(*args)
check_attributes(self, block, attributes, expected_values)
@parameterized.parameters(
[ # BaseBlock attributes test
(
(10, None, None, None, None, 16, 0.5),
SeasonalityBlock,
[
"label_width",
"forecast_periods",
"backcast_periods",
"forecast_fourier_order",
"backcast_fourier_order",
"drop_rate",
"is_interpretable",
"is_g_trainable",
"block_type",
],
[10, 5, 2, 5, 2, 0.5, True, False, "SeasonalityBlock"],
),
]
)
def test_attributes_and_property_after_build(
self, args, cls, attributes, expected_values
):
block = cls(*args)
block(tf.constant([[0.0, 0.0, 0.0, 0.0]]))
check_attributes(self, block, attributes, expected_values)
@parameterized.parameters(
[
(
(-10, 16, 0.5),
BaseBlock,
"`label_width` or its elements has to be greater or equal",
),
(
(10, -10, 10, 16, 0.5),
GenericBlock,
"`g_forecast_neurons` or its elements has to be greater or equal",
),
(
(10, 10, -10, 16, 0.5),
GenericBlock,
"`g_backcast_neurons` or its elements has to be greater or equal",
),
(
(10, -16, 0.5),
BaseBlock,
"`n_neurons` or its elements has to be greater or equal",
),
(
(10, 16, -0.5),
BaseBlock,
"`drop_rate` or its elements has to be between",
),
(
(10, 16, 0.5, False, False, "base"),
BaseBlock,
"`name` has to contain `Block`",
),
(
(10, -10, 16, 0.5),
TrendBlock,
"`p_degree` or its elements has to be greater or equal",
),
(
(10, [10, 10], [10], [10], [10], 16, 0.5),
SeasonalityBlock,
"`forecast_periods` and `forecast_fourier_order` are expected to have the same length",
),
(
(10, [10], [10, 10], [10], [10], 16, 0.5),
SeasonalityBlock,
"`backcast_periods` and `backcast_fourier_order` are expected to have the same length",
),
(
(10, [-10], [10], [10], [10], 16, 0.5),
SeasonalityBlock,
"`forecast_periods` or its elements has to be greater or equal",
),
(
(10, [10], [-10], [10], [10], 16, 0.5),
SeasonalityBlock,
"`backcast_periods` or its elements has to be greater or equal",
),
]
)
def test_raises_error(self, args, cls, error):
with self.assertRaisesRegexp(ValueError, error):
with self.assertRaisesRegexp(AssertionError, error):
if cls == BaseBlock:
obj = ExampleBlock(*args)
else:
obj = cls(*args)
obj.build(tf.TensorShape((None, args[0])))
raise ValueError(error)
@parameterized.parameters(
[(1, 2, 1, 3, 0.0), (1, 2, 1, 3, 0.01),]
)
def test_trendblock(self, label_width, input_width, p_degree, n_neurons, drop_rate):
trend_weights = [
np.zeros(shape=(input_width, n_neurons)),
np.ones(shape=(1, n_neurons)),
np.zeros(shape=(n_neurons, n_neurons)),
np.ones(shape=(1, n_neurons,)),
np.zeros(shape=(n_neurons, n_neurons)),
np.ones(shape=(1, n_neurons,)),
np.zeros(shape=(n_neurons, n_neurons)),
np.ones(shape=(1, n_neurons,)),
np.ones(shape=(n_neurons, p_degree + 1)),
np.ones(shape=(n_neurons, p_degree + 1)),
np.array([[1.0], [0.0]]),
np.array([[1.0, 1.0], [0.0, 0.5]]),
]
if drop_rate == 0.0:
layer_test(
TrendBlock,
kwargs={
"label_width": label_width,
"p_degree": p_degree,
"n_neurons": n_neurons,
"drop_rate": drop_rate,
"weights": trend_weights,
},
input_dtype=floatx(),
input_shape=(2, 2),
expected_output_shape=(
tf.TensorShape((None, 2)),
tf.TensorShape((None, 1)),
),
expected_output_dtype=[floatx(), floatx()],
expected_output=[
tf.constant([3.0, 4.5, 3.0, 4.5], shape=(2, 2)),
tf.constant(3.0, shape=(2, 1)),
],
custom_objects={"TrendBlock": TrendBlock},
)
elif drop_rate > 0:
model = TrendBlock(
label_width=label_width,
p_degree=p_degree,
n_neurons=30,
drop_rate=drop_rate,
)
actual_1 = model(tf.constant([[1.0, 8.0], [1.0, 2.0]]))
actual_2 = model(tf.constant([[1.0, 8.0], [1.0, 2.0]]))
np.testing.assert_raises(
AssertionError, np.testing.assert_array_equal, actual_1[1], actual_2[1]
)
@parameterized.parameters([(2, 3, [2], [3], [2], [3], 3, 0.0)])
def test_seasonalityblock(
self,
label_width,
input_width,
forecast_periods,
backcast_periods,
forecast_fourier_order,
backcast_fourier_order,
n_neurons,
drop_rate,
):
g_forecast_neurons = tf.reduce_sum(2 * forecast_periods)
g_backcast_neurons = tf.reduce_sum(2 * backcast_periods)
seasonality_weights = [
np.zeros(shape=(input_width, n_neurons)),
np.ones(shape=(1, n_neurons,)),
np.zeros(shape=(n_neurons, n_neurons)),
np.ones(shape=(1, n_neurons,)),
np.zeros(shape=(n_neurons, n_neurons)),
np.ones(shape=(1, n_neurons,)),
np.zeros(shape=(n_neurons, n_neurons)),
np.ones(shape=(1, n_neurons,)),
np.ones(shape=(n_neurons, g_forecast_neurons)),
np.ones(shape=(n_neurons, g_backcast_neurons)),
np.array([[1, 1], [1, tf.cos(np.pi)], [0, 0], [0, tf.sin(np.pi)]]),
np.array(
[
[1, 1, 1],
[
1,
tf.cos(1 * (1 / 3) * 2 * np.pi),
tf.cos(1 * (2 / 3) * 2 * np.pi),
],
[
1,
tf.cos(2 * (1 / 3) * 2 * np.pi),
tf.cos(2 * (2 / 3) * 2 * np.pi),
],
[0, 0, 0],
[
0,
tf.sin(1 * (1 / 3) * 2 * np.pi),
tf.sin(1 * (2 / 3) * 2 * np.pi),
],
[
0,
tf.sin(2 * (1 / 3) * 2 * np.pi),
tf.sin(2 * (2 / 3) * 2 * np.pi),
],
]
),
]
y1 = (
3
* (
1
+ tf.cos(1 * (1 / 3) * 2 * np.pi)
+ tf.cos(2 * (1 / 3) * 2 * np.pi)
+ tf.sin(1 * (1 / 3) * 2 * np.pi)
+ tf.sin(2 * (1 / 3) * 2 * np.pi)
).numpy()
)
y2 = (
3
* (
1
+ tf.cos(1 * (2 / 3) * 2 * np.pi)
+ tf.cos(2 * (2 / 3) * 2 * np.pi)
+ tf.sin(1 * (2 / 3) * 2 * np.pi)
+ tf.sin(2 * (2 / 3) * 2 * np.pi)
).numpy()
)
layer_test(
SeasonalityBlock,
kwargs={
"label_width": label_width,
"forecast_periods": forecast_periods,
"backcast_periods": backcast_periods,
"forecast_fourier_order": forecast_fourier_order,
"backcast_fourier_order": backcast_fourier_order,
"n_neurons": n_neurons,
"drop_rate": drop_rate,
"weights": seasonality_weights,
},
input_dtype=floatx(),
input_shape=(2, 3),
expected_output_shape=(
tf.TensorShape((None, 3)),
tf.TensorShape((None, 2)),
),
expected_output_dtype=[floatx(), floatx()],
expected_output=[
tf.constant([9.0, y1, y2, 9.0, y1, y2], shape=(2, 3)),
tf.constant([6.0, 0.0, 6.0, 0.0], shape=(2, 2)),
],
custom_objects={"SeasonalityBlock": SeasonalityBlock},
)
forecast_periods = [1, 2]
backcast_periods = [2, 3]
forecast_fourier_order = [1, 2]
backcast_fourier_order = [2, 3]
g_forecast_neurons = tf.reduce_sum(2 * forecast_periods)
g_backcast_neurons = tf.reduce_sum(2 * backcast_periods)
seasonality_weights[-4:] = [
np.ones(shape=(n_neurons, g_forecast_neurons)),
np.ones(shape=(n_neurons, g_backcast_neurons)),
np.array(
[[1, 1], [0, 0], [1, 1], [1, tf.cos(np.pi)], [0, 0], [0, tf.sin(np.pi)]]
),
np.array(
[
[1, 1, 1],
[
1,
tf.cos(1 * (1 / 2) * 2 * np.pi),
tf.cos(1 * (2 / 2) * 2 * np.pi),
],
[0, 0, 0],
[
0,
tf.sin(1 * (1 / 2) * 2 * np.pi),
tf.sin(1 * (2 / 2) * 2 * np.pi),
],
[1, 1, 1],
[
1,
tf.cos(1 * (1 / 3) * 2 * np.pi),
tf.cos(1 * (2 / 3) * 2 * np.pi),
],
[
1,
tf.cos(2 * (1 / 3) * 2 * np.pi),
tf.cos(2 * (2 / 3) * 2 * np.pi),
],
[0, 0, 0],
[
0,
tf.sin(1 * (1 / 3) * 2 * np.pi),
tf.sin(1 * (2 / 3) * 2 * np.pi),
],
[
0,
tf.sin(2 * (1 / 3) * 2 * np.pi),
tf.sin(2 * (2 / 3) * 2 * np.pi),
],
]
),
]
y1 = (
3
* (
2
+ tf.cos(1 * (1 / 2) * 2 * np.pi)
+ tf.sin(1 * (1 / 2) * 2 * np.pi)
+ tf.cos(1 * (1 / 3) * 2 * np.pi)
+ tf.cos(2 * (1 / 3) * 2 * np.pi)
+ tf.sin(1 * (1 / 3) * 2 * np.pi)
+ tf.sin(2 * (1 / 3) * 2 * np.pi)
).numpy()
)
y2 = (
3
* (
2
+ tf.cos(1 * (2 / 2) * 2 * np.pi)
+ tf.sin(1 * (2 / 2) * 2 * np.pi)
+ tf.cos(1 * (2 / 3) * 2 * np.pi)
+ tf.cos(2 * (2 / 3) * 2 * np.pi)
+ tf.sin(1 * (2 / 3) * 2 * np.pi)
+ tf.sin(2 * (2 / 3) * 2 * np.pi)
).numpy()
)
layer_test(
SeasonalityBlock,
kwargs={
"label_width": label_width,
"forecast_periods": forecast_periods,
"backcast_periods": backcast_periods,
"forecast_fourier_order": forecast_fourier_order,
"backcast_fourier_order": backcast_fourier_order,
"n_neurons": n_neurons,
"drop_rate": drop_rate,
"weights": seasonality_weights,
},
input_dtype=floatx(),
input_shape=(2, 3),
expected_output_shape=(
tf.TensorShape((None, 3)),
tf.TensorShape((None, 2)),
),
expected_output_dtype=[floatx(), floatx()],
expected_output=[
tf.constant([15.0, y1, y2, 15.0, y1, y2], shape=(2, 3)),
tf.constant([9.0, 3.0, 9.0, 3.0], shape=(2, 2)),
],
custom_objects={"SeasonalityBlock": SeasonalityBlock},
)
@parameterized.parameters([(1, 5, 5, 3, 0.0)])
def test_genericblock(
self, label_width, g_forecast_neurons, g_backcast_neurons, n_neurons, drop_rate,
):
layer_test(
GenericBlock,
kwargs={
"label_width": label_width,
"g_forecast_neurons": g_forecast_neurons,
"g_backcast_neurons": g_backcast_neurons,
"n_neurons": n_neurons,
"drop_rate": drop_rate,
},
input_dtype=floatx(),
input_shape=(2, 2),
expected_output_shape=(
tf.TensorShape((None, 2)),
tf.TensorShape((None, 1)),
),
expected_output_dtype=[floatx(), floatx()],
expected_output=None,
custom_objects={"GenericBlock": GenericBlock},
)
|
py | 7df86f1f293188954036b87dd739764070251ebd | from electrum.plugin import hook
from electrum.util import print_msg, raw_input, print_stderr
from electrum.logging import get_logger
from ..hw_wallet.cmdline import CmdLineHandler
from .coldcard import ColdcardPlugin
_logger = get_logger(__name__)
class ColdcardCmdLineHandler(CmdLineHandler):
def get_passphrase(self, msg, confirm):
raise NotImplementedError
def get_pin(self, msg):
raise NotImplementedError
def prompt_auth(self, msg):
raise NotImplementedError
def yes_no_question(self, msg):
print_msg(msg)
return raw_input() in 'yY'
def stop(self):
pass
def show_message(self, msg, on_cancel=None):
print_stderr(msg)
def show_error(self, msg, blocking=False):
print_stderr(msg)
def update_status(self, b):
_logger.info(f'hw device status {b}')
def finished(self):
pass
class Plugin(ColdcardPlugin):
handler = ColdcardCmdLineHandler()
@hook
def init_keystore(self, keystore):
if not isinstance(keystore, self.keystore_class):
return
keystore.handler = self.handler
def create_handler(self, window):
return self.handler
# EOF
|
py | 7df86f35aa59a8cb9853bb56cc77d829e4f64aef | # -*- coding: utf-8 -*-
__version__ = '0.12.0.dev'
__description__ = 'Fully featured framework for fast, easy and documented API development with Flask'
|
py | 7df86f5701d6580fb4ee8520bb9a5af4d49b6a1c | # coding=utf-8
#
# Copyright (c) 2014-2015 First Flamingo Enterprise B.V.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# TSStationPosition.py
# firstflamingo/treinenaapje
#
# Created by Berend Schotanus on 13-May-14.
#
"""TSStationPosition represents the position of a station on a route"""
import re
import logging
from google.appengine.ext import ndb
from ffe.rest_resources import Resource
class TSStationPosition(Resource):
auto_creates_indexes = False
publicly_visible = True
station_key = ndb.KeyProperty(kind='TSStation')
route_key = ndb.KeyProperty(kind='TSRoute')
km = ndb.FloatProperty(default=0.0)
geo_point = ndb.GeoPtProperty()
platform_range = ndb.TextProperty()
identifier_regex = re.compile('([a-z]{2})\.([a-z]{1,5})_([a-z]{2,4}[0-9]{1,2})$')
# ------------ Object lifecycle ------------------------------------------------------------------------------------
@classmethod
def new(cls, identifier=None, country='nl', station_code=None, route_code='xx00'):
if station_code and not identifier:
identifier = '%s.%s_%s' % (country, station_code, route_code)
self = super(TSStationPosition, cls).new(identifier)
station_id = '%s.%s' % (self.country, self.station_code)
self.station_key = ndb.Key('TSStation', station_id)
route_id = '%s.%s' % (self.country, self.route_code)
self.route_key = ndb.Key('TSRoute', route_id)
return self
# ------------ Object metadata -------------------------------------------------------------------------------------
def __repr__(self):
return "<%s %s = km %.3f on %s>" % (self.__class__.__name__, self.station_id, self.km, self.route_id)
@property
def country(self):
return self.id_part(1)
@property
def station(self):
return self.station_key.get()
@property
def station_code(self):
return self.id_part(2)
@property
def station_id(self):
return self.station_key.id()
@property
def route(self):
return self.route_key.get()
@property
def route_code(self):
return self.id_part(3)
@property
def route_id(self):
return self.route_key.id()
@property
def coordinate(self):
if self.geo_point is not None:
return self.geo_point.lat, self.geo_point.lon
else:
return None, None
@coordinate.setter
def coordinate(self, coord):
lat, lon = coord
self.geo_point = ndb.GeoPt(lat, lon)
def dictionary_from_object(self, perspective=None):
dictionary = {'km': self.km, 'lat': self.coordinate[0], 'lon': self.coordinate[1]}
if perspective != 'route':
dictionary['route'] = self.route_id
elif perspective != 'station':
dictionary['station'] = self.station_id
if self.platform_range is not None:
dictionary['platforms'] = self.platform_range
return dictionary
|
py | 7df86f82bb62d67699e1ffdd9a58d603afeddc4e | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/type/money.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/type/money.proto',
package='google.type',
syntax='proto3',
serialized_options=b'\n\017com.google.typeB\nMoneyProtoP\001Z6google.golang.org/genproto/googleapis/type/money;money\370\001\001\242\002\003GTP',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x17google/type/money.proto\x12\x0bgoogle.type\"<\n\x05Money\x12\x15\n\rcurrency_code\x18\x01 \x01(\t\x12\r\n\x05units\x18\x02 \x01(\x03\x12\r\n\x05nanos\x18\x03 \x01(\x05\x42`\n\x0f\x63om.google.typeB\nMoneyProtoP\x01Z6google.golang.org/genproto/googleapis/type/money;money\xf8\x01\x01\xa2\x02\x03GTPb\x06proto3'
)
_MONEY = _descriptor.Descriptor(
name='Money',
full_name='google.type.Money',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='currency_code', full_name='google.type.Money.currency_code', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='units', full_name='google.type.Money.units', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='nanos', full_name='google.type.Money.nanos', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=40,
serialized_end=100,
)
DESCRIPTOR.message_types_by_name['Money'] = _MONEY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Money = _reflection.GeneratedProtocolMessageType('Money', (_message.Message,), {
'DESCRIPTOR' : _MONEY,
'__module__' : 'google.type.money_pb2'
# @@protoc_insertion_point(class_scope:google.type.Money)
})
_sym_db.RegisterMessage(Money)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
py | 7df86fd9dca73164af681a2eaec5004278b29c0e | #!/usr/bin/env python
import rospy
import actionlib
from actionlib import GoalStatus
from geometry_msgs.msg import Pose, Point, Quaternion, Twist
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal, MoveBaseActionFeedback
from tf.transformations import quaternion_from_euler
from visualization_msgs.msg import Marker
from math import pi
from collections import OrderedDict
def setup_task_environment(self):
# How big is the square we want the robot to patrol?
self.square_size = rospy.get_param("~square_size", 1.0) # meters
# Set the low battery threshold (between 0 and 100)
self.low_battery_threshold = rospy.get_param('~low_battery_threshold', 50)
# How many times should we execute the patrol loop
self.n_patrols = rospy.get_param("~n_patrols", 2) # meters
# How long do we have to get to each waypoint?
self.move_base_timeout = rospy.get_param("~move_base_timeout", 10) #seconds
# Initialize the patrol counter
self.patrol_count = 0
# Subscribe to the move_base action server
self.move_base = actionlib.SimpleActionClient("move_base", MoveBaseAction)
rospy.loginfo("Waiting for move_base action server...")
# Wait up to 60 seconds for the action server to become available
self.move_base.wait_for_server(rospy.Duration(60))
rospy.loginfo("Connected to move_base action server")
# Create a list to hold the target quaternions (orientations)
quaternions = list()
# First define the corner orientations as Euler angles
euler_angles = (pi/2, pi, 3*pi/2, 0)
# Then convert the angles to quaternions
for angle in euler_angles:
q_angle = quaternion_from_euler(0, 0, angle, axes='sxyz')
q = Quaternion(*q_angle)
quaternions.append(q)
# Create a list to hold the waypoint poses
self.waypoints = list()
# Append each of the four waypoints to the list. Each waypoint
# is a pose consisting of a position and orientation in the map frame.
self.waypoints.append(Pose(Point(0.0, 0.0, 0.0), quaternions[3]))
self.waypoints.append(Pose(Point(self.square_size, 0.0, 0.0), quaternions[0]))
self.waypoints.append(Pose(Point(self.square_size, self.square_size, 0.0), quaternions[1]))
self.waypoints.append(Pose(Point(0.0, self.square_size, 0.0), quaternions[2]))
# Create a mapping of room names to waypoint locations
room_locations = (('hallway', self.waypoints[0]),
('living_room', self.waypoints[1]),
('kitchen', self.waypoints[2]),
('bathroom', self.waypoints[3]))
# Store the mapping as an ordered dictionary so we can visit the rooms in sequence
self.room_locations = OrderedDict(room_locations)
# Where is the docking station?
self.docking_station_pose = (Pose(Point(0.5, 0.5, 0.0), Quaternion(0.0, 0.0, 0.0, 1.0)))
# Initialize the waypoint visualization markers for RViz
init_waypoint_markers(self)
# Set a visualization marker at each waypoint
for waypoint in self.waypoints:
p = Point()
p = waypoint.position
self.waypoint_markers.points.append(p)
# Set a marker for the docking station
init_docking_station_marker(self)
# Publisher to manually control the robot (e.g. to stop it)
self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=5)
rospy.loginfo("Starting Tasks")
# Publish the waypoint markers
self.marker_pub.publish(self.waypoint_markers)
rospy.sleep(1)
self.marker_pub.publish(self.waypoint_markers)
# Publish the docking station marker
self.docking_station_marker_pub.publish(self.docking_station_marker)
rospy.sleep(1)
def init_waypoint_markers(self):
# Set up our waypoint markers
marker_scale = 0.2
marker_lifetime = 0 # 0 is forever
marker_ns = 'waypoints'
marker_id = 0
marker_color = {'r': 1.0, 'g': 0.7, 'b': 1.0, 'a': 1.0}
# Define a marker publisher.
self.marker_pub = rospy.Publisher('waypoint_markers', Marker, queue_size=5)
# Initialize the marker points list.
self.waypoint_markers = Marker()
self.waypoint_markers.ns = marker_ns
self.waypoint_markers.id = marker_id
self.waypoint_markers.type = Marker.CUBE_LIST
self.waypoint_markers.action = Marker.ADD
self.waypoint_markers.lifetime = rospy.Duration(marker_lifetime)
self.waypoint_markers.scale.x = marker_scale
self.waypoint_markers.scale.y = marker_scale
self.waypoint_markers.color.r = marker_color['r']
self.waypoint_markers.color.g = marker_color['g']
self.waypoint_markers.color.b = marker_color['b']
self.waypoint_markers.color.a = marker_color['a']
self.waypoint_markers.header.frame_id = 'odom'
self.waypoint_markers.header.stamp = rospy.Time.now()
self.waypoint_markers.points = list()
def init_docking_station_marker(self):
# Define a marker for the charging station
marker_scale = 0.3
marker_lifetime = 0 # 0 is forever
marker_ns = 'waypoints'
marker_id = 0
marker_color = {'r': 0.7, 'g': 0.7, 'b': 0.0, 'a': 1.0}
self.docking_station_marker_pub = rospy.Publisher('docking_station_marker', Marker, queue_size=5)
self.docking_station_marker = Marker()
self.docking_station_marker.ns = marker_ns
self.docking_station_marker.id = marker_id
self.docking_station_marker.type = Marker.CYLINDER
self.docking_station_marker.action = Marker.ADD
self.docking_station_marker.lifetime = rospy.Duration(marker_lifetime)
self.docking_station_marker.scale.x = marker_scale
self.docking_station_marker.scale.y = marker_scale
self.docking_station_marker.scale.z = 0.02
self.docking_station_marker.color.r = marker_color['r']
self.docking_station_marker.color.g = marker_color['g']
self.docking_station_marker.color.b = marker_color['b']
self.docking_station_marker.color.a = marker_color['a']
self.docking_station_marker.header.frame_id = 'odom'
self.docking_station_marker.header.stamp = rospy.Time.now()
self.docking_station_marker.pose = self.docking_station_pose
|
py | 7df86fdf095fbb348bbd3e58b06804d91863db11 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# import django
# sys.path.insert(0, os.path.abspath('..'))
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
# django.setup()
# -- Project information -----------------------------------------------------
project = "Logical Console"
copyright = """2020, John Teague"""
author = "John Teague"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
|
py | 7df86fe385da7418ae357dc2ccf251631c2d6ec5 | # Generated by Django 2.2 on 2020-09-21 19:04
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=254, unique=True)),
('name', models.CharField(max_length=254)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
py | 7df870f7ee42fc40f334859d523571bd24e7f0b1 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for side inputs."""
import logging
import unittest
from nose.plugins.attrib import attr
import apache_beam as beam
from apache_beam.test_pipeline import TestPipeline
from apache_beam.transforms import window
from apache_beam.transforms.util import assert_that, equal_to
class SideInputsTest(unittest.TestCase):
def create_pipeline(self):
return TestPipeline()
def run_windowed_side_inputs(self, elements, main_window_fn,
side_window_fn=None,
side_input_type=beam.pvalue.AsList,
combine_fn=None,
expected=None):
with self.create_pipeline() as p:
pcoll = p | beam.Create(elements) | beam.Map(
lambda t: window.TimestampedValue(t, t))
main = pcoll | 'WindowMain' >> beam.WindowInto(main_window_fn)
side = pcoll | 'WindowSide' >> beam.WindowInto(
side_window_fn or main_window_fn)
kw = {}
if combine_fn is not None:
side |= beam.CombineGlobally(combine_fn).without_defaults()
kw['default_value'] = 0
elif side_input_type == beam.pvalue.AsDict:
side |= beam.Map(lambda x: ('k%s' % x, 'v%s' % x))
res = main | beam.Map(lambda x, s: (x, s), side_input_type(side, **kw))
if side_input_type in (beam.pvalue.AsIter, beam.pvalue.AsList):
res |= beam.Map(lambda (x, s): (x, sorted(s)))
assert_that(res, equal_to(expected))
def test_global_global_windows(self):
self.run_windowed_side_inputs(
[1, 2, 3],
window.GlobalWindows(),
expected=[(1, [1, 2, 3]), (2, [1, 2, 3]), (3, [1, 2, 3])])
def test_same_fixed_windows(self):
self.run_windowed_side_inputs(
[1, 2, 11],
window.FixedWindows(10),
expected=[(1, [1, 2]), (2, [1, 2]), (11, [11])])
def test_different_fixed_windows(self):
self.run_windowed_side_inputs(
[1, 2, 11, 21, 31],
window.FixedWindows(10),
window.FixedWindows(20),
expected=[(1, [1, 2, 11]), (2, [1, 2, 11]), (11, [1, 2, 11]),
(21, [21, 31]), (31, [21, 31])])
def test_fixed_global_window(self):
self.run_windowed_side_inputs(
[1, 2, 11],
window.FixedWindows(10),
window.GlobalWindows(),
expected=[(1, [1, 2, 11]), (2, [1, 2, 11]), (11, [1, 2, 11])])
def test_sliding_windows(self):
self.run_windowed_side_inputs(
[1, 2, 4],
window.SlidingWindows(size=6, period=2),
window.SlidingWindows(size=6, period=2),
expected=[
# Element 1 falls in three windows
(1, [1]), # [-4, 2)
(1, [1, 2]), # [-2, 4)
(1, [1, 2, 4]), # [0, 6)
# as does 2,
(2, [1, 2]), # [-2, 4)
(2, [1, 2, 4]), # [0, 6)
(2, [2, 4]), # [2, 8)
# and 4.
(4, [1, 2, 4]), # [0, 6)
(4, [2, 4]), # [2, 8)
(4, [4]), # [4, 10)
])
def test_windowed_iter(self):
self.run_windowed_side_inputs(
[1, 2, 11],
window.FixedWindows(10),
side_input_type=beam.pvalue.AsIter,
expected=[(1, [1, 2]), (2, [1, 2]), (11, [11])])
def test_windowed_singleton(self):
self.run_windowed_side_inputs(
[1, 2, 11],
window.FixedWindows(10),
side_input_type=beam.pvalue.AsSingleton,
combine_fn=sum,
expected=[(1, 3), (2, 3), (11, 11)])
def test_windowed_dict(self):
self.run_windowed_side_inputs(
[1, 2, 11],
window.FixedWindows(10),
side_input_type=beam.pvalue.AsDict,
expected=[
(1, {'k1': 'v1', 'k2': 'v2'}),
(2, {'k1': 'v1', 'k2': 'v2'}),
(11, {'k11': 'v11'}),
])
@attr('ValidatesRunner')
def test_empty_singleton_side_input(self):
pipeline = self.create_pipeline()
pcol = pipeline | 'start' >> beam.Create([1, 2])
side = pipeline | 'side' >> beam.Create([]) # Empty side input.
def my_fn(k, s):
# TODO(robertwb): Should this be an error as in Java?
v = ('empty' if isinstance(s, beam.pvalue.EmptySideInput) else 'full')
return [(k, v)]
result = pcol | 'compute' >> beam.FlatMap(
my_fn, beam.pvalue.AsSingleton(side))
assert_that(result, equal_to([(1, 'empty'), (2, 'empty')]))
pipeline.run()
# @attr('ValidatesRunner')
# TODO(BEAM-1124): Temporarily disable it due to test failed running on
# Dataflow service.
def test_multi_valued_singleton_side_input(self):
pipeline = self.create_pipeline()
pcol = pipeline | 'start' >> beam.Create([1, 2])
side = pipeline | 'side' >> beam.Create([3, 4]) # 2 values in side input.
pcol | 'compute' >> beam.FlatMap( # pylint: disable=expression-not-assigned
lambda x, s: [x * s], beam.pvalue.AsSingleton(side))
with self.assertRaises(ValueError):
pipeline.run()
@attr('ValidatesRunner')
def test_default_value_singleton_side_input(self):
pipeline = self.create_pipeline()
pcol = pipeline | 'start' >> beam.Create([1, 2])
side = pipeline | 'side' >> beam.Create([]) # 0 values in side input.
result = pcol | beam.FlatMap(
lambda x, s: [x * s], beam.pvalue.AsSingleton(side, 10))
assert_that(result, equal_to([10, 20]))
pipeline.run()
@attr('ValidatesRunner')
def test_iterable_side_input(self):
pipeline = self.create_pipeline()
pcol = pipeline | 'start' >> beam.Create([1, 2])
side = pipeline | 'side' >> beam.Create([3, 4]) # 2 values in side input.
result = pcol | 'compute' >> beam.FlatMap(
lambda x, s: [x * y for y in s],
beam.pvalue.AsIter(side))
assert_that(result, equal_to([3, 4, 6, 8]))
pipeline.run()
@attr('ValidatesRunner')
def test_as_list_and_as_dict_side_inputs(self):
a_list = [5, 1, 3, 2, 9]
some_pairs = [('crouton', 17), ('supreme', None)]
pipeline = self.create_pipeline()
main_input = pipeline | 'main input' >> beam.Create([1])
side_list = pipeline | 'side list' >> beam.Create(a_list)
side_pairs = pipeline | 'side pairs' >> beam.Create(some_pairs)
results = main_input | 'concatenate' >> beam.FlatMap(
lambda x, the_list, the_dict: [[x, the_list, the_dict]],
beam.pvalue.AsList(side_list), beam.pvalue.AsDict(side_pairs))
def matcher(expected_elem, expected_list, expected_pairs):
def match(actual):
[[actual_elem, actual_list, actual_dict]] = actual
equal_to([expected_elem])([actual_elem])
equal_to(expected_list)(actual_list)
equal_to(expected_pairs)(actual_dict.iteritems())
return match
assert_that(results, matcher(1, a_list, some_pairs))
pipeline.run()
@attr('ValidatesRunner')
def test_as_singleton_without_unique_labels(self):
# This should succeed as calling beam.pvalue.AsSingleton on the same
# PCollection twice with the same defaults will return the same
# PCollectionView.
a_list = [2]
pipeline = self.create_pipeline()
main_input = pipeline | 'main input' >> beam.Create([1])
side_list = pipeline | 'side list' >> beam.Create(a_list)
results = main_input | beam.FlatMap(
lambda x, s1, s2: [[x, s1, s2]],
beam.pvalue.AsSingleton(side_list), beam.pvalue.AsSingleton(side_list))
def matcher(expected_elem, expected_singleton):
def match(actual):
[[actual_elem, actual_singleton1, actual_singleton2]] = actual
equal_to([expected_elem])([actual_elem])
equal_to([expected_singleton])([actual_singleton1])
equal_to([expected_singleton])([actual_singleton2])
return match
assert_that(results, matcher(1, 2))
pipeline.run()
@attr('ValidatesRunner')
def test_as_singleton_with_different_defaults_without_unique_labels(self):
# This should fail as beam.pvalue.AsSingleton with distinct default values
# should beam.Create distinct PCollectionViews with the same full_label.
a_list = [2]
pipeline = self.create_pipeline()
main_input = pipeline | 'main input' >> beam.Create([1])
side_list = pipeline | 'side list' >> beam.Create(a_list)
with self.assertRaises(RuntimeError) as e:
_ = main_input | beam.FlatMap(
lambda x, s1, s2: [[x, s1, s2]],
beam.pvalue.AsSingleton(side_list),
beam.pvalue.AsSingleton(side_list, default_value=3))
self.assertTrue(
e.exception.message.startswith(
'Transform "ViewAsSingleton(side list.None)" does not have a '
'stable unique label.'))
@attr('ValidatesRunner')
def test_as_singleton_with_different_defaults_with_unique_labels(self):
a_list = []
pipeline = self.create_pipeline()
main_input = pipeline | 'main input' >> beam.Create([1])
side_list = pipeline | 'side list' >> beam.Create(a_list)
results = main_input | beam.FlatMap(
lambda x, s1, s2: [[x, s1, s2]],
beam.pvalue.AsSingleton(side_list, default_value=2, label='si1'),
beam.pvalue.AsSingleton(side_list, default_value=3, label='si2'))
def matcher(expected_elem, expected_singleton1, expected_singleton2):
def match(actual):
[[actual_elem, actual_singleton1, actual_singleton2]] = actual
equal_to([expected_elem])([actual_elem])
equal_to([expected_singleton1])([actual_singleton1])
equal_to([expected_singleton2])([actual_singleton2])
return match
assert_that(results, matcher(1, 2, 3))
pipeline.run()
@attr('ValidatesRunner')
def test_as_list_without_unique_labels(self):
# This should succeed as calling beam.pvalue.AsList on the same
# PCollection twice will return the same PCollectionView.
a_list = [1, 2, 3]
pipeline = self.create_pipeline()
main_input = pipeline | 'main input' >> beam.Create([1])
side_list = pipeline | 'side list' >> beam.Create(a_list)
results = main_input | beam.FlatMap(
lambda x, ls1, ls2: [[x, ls1, ls2]],
beam.pvalue.AsList(side_list), beam.pvalue.AsList(side_list))
def matcher(expected_elem, expected_list):
def match(actual):
[[actual_elem, actual_list1, actual_list2]] = actual
equal_to([expected_elem])([actual_elem])
equal_to(expected_list)(actual_list1)
equal_to(expected_list)(actual_list2)
return match
assert_that(results, matcher(1, [1, 2, 3]))
pipeline.run()
@attr('ValidatesRunner')
def test_as_list_with_unique_labels(self):
a_list = [1, 2, 3]
pipeline = self.create_pipeline()
main_input = pipeline | 'main input' >> beam.Create([1])
side_list = pipeline | 'side list' >> beam.Create(a_list)
results = main_input | beam.FlatMap(
lambda x, ls1, ls2: [[x, ls1, ls2]],
beam.pvalue.AsList(side_list),
beam.pvalue.AsList(side_list, label='label'))
def matcher(expected_elem, expected_list):
def match(actual):
[[actual_elem, actual_list1, actual_list2]] = actual
equal_to([expected_elem])([actual_elem])
equal_to(expected_list)(actual_list1)
equal_to(expected_list)(actual_list2)
return match
assert_that(results, matcher(1, [1, 2, 3]))
pipeline.run()
@attr('ValidatesRunner')
def test_as_dict_with_unique_labels(self):
some_kvs = [('a', 1), ('b', 2)]
pipeline = self.create_pipeline()
main_input = pipeline | 'main input' >> beam.Create([1])
side_kvs = pipeline | 'side kvs' >> beam.Create(some_kvs)
results = main_input | beam.FlatMap(
lambda x, dct1, dct2: [[x, dct1, dct2]],
beam.pvalue.AsDict(side_kvs),
beam.pvalue.AsDict(side_kvs, label='label'))
def matcher(expected_elem, expected_kvs):
def match(actual):
[[actual_elem, actual_dict1, actual_dict2]] = actual
equal_to([expected_elem])([actual_elem])
equal_to(expected_kvs)(actual_dict1.iteritems())
equal_to(expected_kvs)(actual_dict2.iteritems())
return match
assert_that(results, matcher(1, some_kvs))
pipeline.run()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
|
py | 7df8710376727ff1b81bb169634bbcadec01fd3b | from __future__ import absolute_import, division, print_function
__metaclass__ = type
from .....plugins.module_utils.my_util import hello
def test_hello():
assert hello('Ansibull') == 'Hello Ansibull'
|
py | 7df87143afb779ce335b2e6d2783417cb1d20050 | """Test sunsynk library."""
from typing import Sequence
from unittest.mock import AsyncMock
import pytest
# from sunsynk.definitions import serial
from sunsynk import Sunsynk, pySunsynk, uSunsynk
@pytest.fixture
def sss() -> Sequence[Sunsynk]:
res: Sequence[Sunsynk] = []
if uSunsynk:
res.append(uSunsynk())
if pySunsynk:
res.append(pySunsynk())
return res
@pytest.mark.asyncio
async def test_ss():
if pySunsynk:
ss = pySunsynk()
with pytest.raises(ConnectionError):
await ss.connect()
@pytest.mark.asyncio
async def test_ss_tcp():
if pySunsynk:
ss = pySunsynk()
ss.port = "127.0.0.1:502"
with pytest.raises(ConnectionError):
await ss.connect()
@pytest.mark.asyncio
async def test_ss_read(sss):
for ss in sss:
if uSunsynk:
ss = uSunsynk()
ss.client = AsyncMock()
if pySunsynk:
ss = pySunsynk()
ss.client = AsyncMock()
|
py | 7df871aaf5fd211bfdf88d57b630d22d4fa0a0fc | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from inspect import getmembers, isfunction
from unittest import mock
from unittest.mock import ANY, PropertyMock
import pytest
import torch
from torch.utils.data import DataLoader
from pytorch_lightning import __version__, Callback, LightningDataModule, LightningModule, Trainer
from tests.helpers import BoringDataModule, BoringModel, RandomDataset
from tests.helpers.runif import RunIf
@pytest.mark.parametrize('max_steps', [1, 2, 3])
def test_on_before_zero_grad_called(tmpdir, max_steps):
class CurrentTestModel(BoringModel):
on_before_zero_grad_called = 0
def on_before_zero_grad(self, optimizer):
self.on_before_zero_grad_called += 1
model = CurrentTestModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=max_steps,
max_epochs=2,
)
assert 0 == model.on_before_zero_grad_called
trainer.fit(model)
assert max_steps == model.on_before_zero_grad_called
model.on_before_zero_grad_called = 0
trainer.test(model)
assert 0 == model.on_before_zero_grad_called
def test_training_epoch_end_metrics_collection(tmpdir):
""" Test that progress bar metrics also get collected at the end of an epoch. """
num_epochs = 3
class CurrentModel(BoringModel):
def training_step(self, *args, **kwargs):
output = super().training_step(*args, **kwargs)
self.log_dict({'step_metric': torch.tensor(-1), 'shared_metric': 100}, logger=False, prog_bar=True)
return output
def training_epoch_end(self, outputs):
epoch = self.current_epoch
# both scalar tensors and Python numbers are accepted
self.log_dict(
{
f'epoch_metric_{epoch}': torch.tensor(epoch),
'shared_metric': 111
},
logger=False,
prog_bar=True,
)
model = CurrentModel()
trainer = Trainer(
max_epochs=num_epochs,
default_root_dir=tmpdir,
overfit_batches=2,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
metrics = trainer.progress_bar_dict
# metrics added in training step should be unchanged by epoch end method
assert metrics['step_metric'] == -1
# a metric shared in both methods gets overwritten by epoch_end
assert metrics['shared_metric'] == 111
# metrics are kept after each epoch
for i in range(num_epochs):
assert metrics[f'epoch_metric_{i}'] == i
def test_training_epoch_end_metrics_collection_on_override(tmpdir):
""" Test that batch end metrics are collected when training_epoch_end is overridden at the end of an epoch. """
class OverriddenModel(BoringModel):
def __init__(self):
super().__init__()
self.len_outputs = 0
def on_train_epoch_start(self):
self.num_train_batches = 0
def training_epoch_end(self, outputs):
self.len_outputs = len(outputs)
def on_train_batch_end(self, outputs, batch, batch_idx, dataloader_idx):
self.num_train_batches += 1
class NotOverriddenModel(BoringModel):
def on_train_epoch_start(self):
self.num_train_batches = 0
def on_train_batch_end(self, outputs, batch, batch_idx, dataloader_idx):
self.num_train_batches += 1
overridden_model = OverriddenModel()
not_overridden_model = NotOverriddenModel()
not_overridden_model.training_epoch_end = None
trainer = Trainer(
max_epochs=1,
default_root_dir=tmpdir,
overfit_batches=2,
)
trainer.fit(overridden_model)
assert overridden_model.len_outputs == overridden_model.num_train_batches
@RunIf(min_gpus=1)
@mock.patch("pytorch_lightning.accelerators.accelerator.Accelerator.lightning_module", new_callable=PropertyMock)
def test_apply_batch_transfer_handler(model_getter_mock):
expected_device = torch.device('cuda', 0)
class CustomBatch:
def __init__(self, data):
self.samples = data[0]
self.targets = data[1]
class CurrentTestModel(BoringModel):
rank = 0
transfer_batch_to_device_hook_rank = None
on_before_batch_transfer_hook_rank = None
on_after_batch_transfer_hook_rank = None
def on_before_batch_transfer(self, batch, dataloader_idx):
assert dataloader_idx is None
self.on_before_batch_transfer_hook_rank = self.rank
self.rank += 1
batch.samples += 1
return batch
def on_after_batch_transfer(self, batch, dataloader_idx):
assert dataloader_idx is None
assert batch.samples.device == batch.targets.device == expected_device
self.on_after_batch_transfer_hook_rank = self.rank
self.rank += 1
batch.targets *= 2
return batch
def transfer_batch_to_device(self, batch, device, dataloader_idx):
assert dataloader_idx is None
self.transfer_batch_to_device_hook_rank = self.rank
self.rank += 1
batch.samples = batch.samples.to(device)
batch.targets = batch.targets.to(device)
return batch
model = CurrentTestModel()
batch = CustomBatch((torch.zeros(5, 32), torch.ones(5, 1, dtype=torch.long)))
trainer = Trainer(gpus=1)
# running .fit() would require us to implement custom data loaders, we mock the model reference instead
model_getter_mock.return_value = model
batch_gpu = trainer.accelerator.batch_to_device(batch, expected_device)
assert model.on_before_batch_transfer_hook_rank == 0
assert model.transfer_batch_to_device_hook_rank == 1
assert model.on_after_batch_transfer_hook_rank == 2
assert batch_gpu.samples.device == batch_gpu.targets.device == expected_device
assert torch.allclose(batch_gpu.samples.cpu(), torch.ones(5, 32))
assert torch.allclose(batch_gpu.targets.cpu(), torch.ones(5, 1, dtype=torch.long) * 2)
@RunIf(min_gpus=2, special=True)
def test_transfer_batch_hook_ddp(tmpdir):
"""
Test custom data are properly moved to the right device using ddp
"""
class CustomBatch:
def __init__(self, data):
self.samples = data[0]
def to(self, device, **kwargs):
self.samples = self.samples.to(device, **kwargs)
return self
def collate_fn(batch):
return CustomBatch(batch)
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
assert batch.samples.device == self.device
assert isinstance(batch_idx, int)
def train_dataloader(self):
return torch.utils.data.DataLoader(RandomDataset(32, 64), collate_fn=collate_fn)
model = TestModel()
model.validation_step = None
model.training_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=0,
max_epochs=1,
weights_summary=None,
accelerator="ddp",
gpus=2,
)
trainer.fit(model)
def get_members(cls):
return {h for h, _ in getmembers(cls, predicate=isfunction) if not h.startswith('_')}
class HookedCallback(Callback):
def __init__(self, called):
def call(hook, *args, **kwargs):
d = {'name': f'Callback.{hook}'}
if args:
d['args'] = args
if kwargs:
d['kwargs'] = kwargs
called.append(d)
for h in get_members(Callback):
setattr(self, h, partial(call, h))
class HookedModel(BoringModel):
def __init__(self, called):
super().__init__()
pl_module_hooks = get_members(LightningModule)
# remove non-hooks
pl_module_hooks.difference_update({'optimizers'})
# remove most `nn.Module` hooks
module_hooks = get_members(torch.nn.Module)
module_hooks.difference_update({'forward', 'zero_grad', 'train'})
pl_module_hooks.difference_update(module_hooks)
def call(hook, fn, *args, **kwargs):
out = fn(*args, **kwargs)
d = {'name': hook}
if args:
d['args'] = args
elif hook == 'train':
# DeepSpeed calls `train(mode)` but we do not. Standardize
# https://github.com/microsoft/DeepSpeed/pull/571
d['args'] = (True, )
if kwargs:
d['kwargs'] = kwargs
called.append(d)
return out
for h in pl_module_hooks:
attr = getattr(self, h)
setattr(self, h, partial(call, h, attr))
def validation_epoch_end(self, *args, **kwargs):
# `BoringModel` does not have a return for `validation_step_end` so this would fail
pass
def test_epoch_end(self, *args, **kwargs):
# `BoringModel` does not have a return for `test_step_end` so this would fail
pass
def _train_batch(self, *args, **kwargs):
if self.automatic_optimization:
return self._auto_train_batch(*args, **kwargs)
return self._manual_train_batch(*args, **kwargs)
@staticmethod
def _auto_train_batch(trainer, model, batches, device=torch.device('cpu'), current_epoch=0, **kwargs):
using_native_amp = kwargs.get('amp_backend') == 'native'
using_deepspeed = kwargs.get('plugins') == 'deepspeed'
out = []
on_before_optimizer_step = [
dict(name='Callback.on_before_optimizer_step', args=(trainer, model, ANY, 0)),
dict(name='on_before_optimizer_step', args=(ANY, 0)),
]
for i in range(batches):
out.extend([
dict(name='on_before_batch_transfer', args=(ANY, 0)),
dict(name='transfer_batch_to_device', args=(ANY, device, 0)),
dict(name='on_after_batch_transfer', args=(ANY, 0)),
# TODO: `on_batch_{start,end}`
dict(name='Callback.on_batch_start', args=(trainer, model)),
dict(name='Callback.on_train_batch_start', args=(trainer, model, ANY, i, 0)),
dict(name='on_train_batch_start', args=(ANY, i, 0)),
# these are before the training step because
# they are not part of the `training_step_and_backward` closure, however,
# with native amp, the closure is run first and then the optimizer step.
*(on_before_optimizer_step if not using_native_amp else []),
dict(name='forward', args=(ANY, )),
dict(name='training_step', args=(ANY, i)),
dict(name='training_step_end', args=(dict(loss=ANY), )),
dict(name='Callback.on_before_zero_grad', args=(trainer, model, ANY)),
dict(name='on_before_zero_grad', args=(ANY, )),
dict(name='optimizer_zero_grad', args=(current_epoch, i, ANY, 0)),
dict(name='Callback.on_before_backward', args=(trainer, model, ANY)),
dict(name='on_before_backward', args=(ANY, )),
# DeepSpeed handles backward internally
*([dict(name='backward', args=(ANY, ANY, 0))] if not using_deepspeed else []),
dict(name='Callback.on_after_backward', args=(trainer, model)),
dict(name='on_after_backward'),
*(on_before_optimizer_step if using_native_amp else []),
dict(
name='optimizer_step',
args=(current_epoch, i, ANY, 0, ANY),
kwargs=dict(on_tpu=False, using_lbfgs=False, using_native_amp=using_native_amp)
),
dict(name='Callback.on_train_batch_end', args=(trainer, model, dict(loss=ANY), ANY, i, 0)),
dict(name='on_train_batch_end', args=(dict(loss=ANY), ANY, i, 0)),
dict(name='Callback.on_batch_end', args=(trainer, model)),
])
return out
@staticmethod
def _manual_train_batch(trainer, model, batches, device=torch.device('cpu'), **kwargs):
using_deepspeed = kwargs.get('plugins') == 'deepspeed'
out = []
for i in range(batches):
out.extend([
dict(name='on_before_batch_transfer', args=(ANY, 0)),
dict(name='transfer_batch_to_device', args=(ANY, device, 0)),
dict(name='on_after_batch_transfer', args=(ANY, 0)),
# TODO: `on_batch_{start,end}`
dict(name='Callback.on_batch_start', args=(trainer, model)),
dict(name='Callback.on_train_batch_start', args=(trainer, model, ANY, i, 0)),
dict(name='on_train_batch_start', args=(ANY, i, 0)),
dict(name='forward', args=(ANY, )),
dict(name='Callback.on_before_backward', args=(trainer, model, ANY)),
dict(name='on_before_backward', args=(ANY, )),
# DeepSpeed handles backward internally
*([dict(name='backward', args=(ANY, None, None))] if not using_deepspeed else []),
dict(name='Callback.on_after_backward', args=(trainer, model)),
dict(name='on_after_backward'),
# `manual_backward` calls the previous 3
dict(name='manual_backward', args=(ANY, )),
dict(name='Callback.on_before_optimizer_step', args=(trainer, model, ANY, 0)),
dict(name='on_before_optimizer_step', args=(ANY, 0)),
dict(name='training_step', args=(ANY, i)),
dict(name='training_step_end', args=(dict(loss=ANY), )),
dict(name='Callback.on_train_batch_end', args=(trainer, model, dict(loss=ANY), ANY, i, 0)),
dict(name='on_train_batch_end', args=(dict(loss=ANY), ANY, i, 0)),
dict(name='Callback.on_batch_end', args=(trainer, model)),
])
return out
@staticmethod
def _eval_epoch(fn, trainer, model, batches, key, device=torch.device('cpu')):
outputs = {key: ANY}
return [
dict(name='Callback.on_epoch_start', args=(trainer, model)),
dict(name='on_epoch_start'),
dict(name=f'Callback.on_{fn}_epoch_start', args=(trainer, model)),
dict(name=f'on_{fn}_epoch_start'),
*HookedModel._eval_batch(fn, trainer, model, batches, key, device=device),
dict(name=f'{fn}_epoch_end', args=([outputs] * batches, )),
dict(name=f'Callback.on_{fn}_epoch_end', args=(trainer, model)),
dict(name=f'on_{fn}_epoch_end'),
dict(name='Callback.on_epoch_end', args=(trainer, model)),
dict(name='on_epoch_end'),
]
@staticmethod
def _eval_batch(fn, trainer, model, batches, key, device=torch.device('cpu')):
out = []
outputs = {key: ANY}
for i in range(batches):
out.extend([
dict(name='on_before_batch_transfer', args=(ANY, 0)),
dict(name='transfer_batch_to_device', args=(ANY, device, 0)),
dict(name='on_after_batch_transfer', args=(ANY, 0)),
# TODO: `{,Callback}.on_batch_{start,end}`
dict(name=f'Callback.on_{fn}_batch_start', args=(trainer, model, ANY, i, 0)),
dict(name=f'on_{fn}_batch_start', args=(ANY, i, 0)),
dict(name='forward', args=(ANY, )),
dict(name=f'{fn}_step', args=(ANY, i)),
dict(name=f'{fn}_step_end', args=(outputs, )),
dict(name=f'Callback.on_{fn}_batch_end', args=(trainer, model, outputs, ANY, i, 0)),
dict(name=f'on_{fn}_batch_end', args=(outputs, ANY, i, 0)),
])
return out
@staticmethod
def _predict_batch(trainer, model, batches):
out = []
for i in range(batches):
out.extend([
dict(name='on_before_batch_transfer', args=(ANY, 0)),
dict(name='transfer_batch_to_device', args=(ANY, torch.device('cpu'), 0)),
dict(name='on_after_batch_transfer', args=(ANY, 0)),
# TODO: `{,Callback}.on_batch_{start,end}`
dict(name='Callback.on_predict_batch_start', args=(trainer, model, ANY, i, 0)),
dict(name='on_predict_batch_start', args=(ANY, i, 0)),
dict(name='forward', args=(ANY, )),
dict(name='predict_step', args=(ANY, i)),
# TODO: `predict_step_end`
dict(name='Callback.on_predict_batch_end', args=(trainer, model, ANY, ANY, i, 0)),
dict(name='on_predict_batch_end', args=(ANY, ANY, i, 0)),
])
return out
@pytest.mark.parametrize(
'kwargs',
[
{},
# these precision plugins modify the optimization flow, so testing them explicitly
pytest.param(dict(gpus=1, precision=16, plugins='deepspeed'), marks=RunIf(deepspeed=True, min_gpus=1)),
pytest.param(dict(gpus=1, precision=16, amp_backend='native'), marks=RunIf(amp_native=True, min_gpus=1)),
pytest.param(dict(gpus=1, precision=16, amp_backend='apex'), marks=RunIf(amp_apex=True, min_gpus=1)),
]
)
@pytest.mark.parametrize('automatic_optimization', (True, False))
def test_trainer_model_hook_system_fit(tmpdir, kwargs, automatic_optimization):
called = []
class TestModel(HookedModel):
def __init__(self, *args):
super().__init__(*args)
self.automatic_optimization = automatic_optimization
def training_step(self, batch, batch_idx):
if self.automatic_optimization:
return super().training_step(batch, batch_idx)
loss = self.step(batch[0])
opt = self.optimizers()
opt.zero_grad()
self.manual_backward(loss)
opt.step()
return {'loss': loss}
model = TestModel(called)
callback = HookedCallback(called)
train_batches = 2
val_batches = 2
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=train_batches,
limit_val_batches=val_batches,
progress_bar_refresh_rate=0,
weights_summary=None,
callbacks=[callback],
**kwargs,
)
assert called == [
dict(name='Callback.on_init_start', args=(trainer, )),
dict(name='Callback.on_init_end', args=(trainer, )),
]
trainer.fit(model)
saved_ckpt = {
'callbacks': ANY,
'epoch': 1,
'global_step': train_batches,
'lr_schedulers': ANY,
'optimizer_states': ANY,
'pytorch-lightning_version': __version__,
'state_dict': ANY,
}
if kwargs.get('amp_backend') == 'native':
saved_ckpt['native_amp_scaling_state'] = ANY
elif kwargs.get('amp_backend') == 'apex':
saved_ckpt['amp_scaling_state'] = ANY
device = torch.device('cuda:0' if 'gpus' in kwargs else 'cpu')
expected = [
dict(name='Callback.on_init_start', args=(trainer, )),
dict(name='Callback.on_init_end', args=(trainer, )),
dict(name='prepare_data'),
dict(name='configure_callbacks'),
dict(name='Callback.on_before_accelerator_backend_setup', args=(trainer, model)),
# DeepSpeed needs the batch size to figure out throughput logging
*([dict(name='train_dataloader')] if kwargs.get('plugins') == 'deepspeed' else []),
dict(name='Callback.setup', args=(trainer, model), kwargs=dict(stage='fit')),
dict(name='setup', kwargs=dict(stage='fit')),
dict(name='configure_sharded_model'),
dict(name='Callback.on_configure_sharded_model', args=(trainer, model)),
# DeepSpeed skips initializing optimizers here as they are handled via config
*([dict(name='configure_optimizers')] if kwargs.get('plugins') != 'deepspeed' else []),
dict(name='Callback.on_fit_start', args=(trainer, model)),
dict(name='on_fit_start'),
# TODO: explore whether DeepSpeed can have the same flow for optimizers
# DeepSpeed did not find any optimizer in the config so they are loaded here
*([dict(name='configure_optimizers')] if kwargs.get('plugins') == 'deepspeed' else []),
dict(name='Callback.on_pretrain_routine_start', args=(trainer, model)),
dict(name='on_pretrain_routine_start'),
dict(name='Callback.on_pretrain_routine_end', args=(trainer, model)),
dict(name='on_pretrain_routine_end'),
dict(name='Callback.on_sanity_check_start', args=(trainer, model)),
dict(name='on_val_dataloader'),
dict(name='val_dataloader'),
dict(name='train', args=(False, )),
dict(name='on_validation_model_eval'),
dict(name='zero_grad'),
dict(name='Callback.on_validation_start', args=(trainer, model)),
dict(name='on_validation_start'),
*model._eval_epoch('validation', trainer, model, val_batches, 'x', device=device),
dict(name='Callback.on_validation_end', args=(trainer, model)),
dict(name='on_validation_end'),
dict(name='train', args=(True, )),
dict(name='on_validation_model_train'),
dict(name='Callback.on_sanity_check_end', args=(trainer, model)),
# duplicate `train` because `_run_train` calls it again in case validation wasn't run
dict(name='train', args=(True, )),
dict(name='on_train_dataloader'),
dict(name='train_dataloader'),
dict(name='Callback.on_train_start', args=(trainer, model)),
dict(name='on_train_start'),
dict(name='Callback.on_epoch_start', args=(trainer, model)),
dict(name='on_epoch_start'),
dict(name='Callback.on_train_epoch_start', args=(trainer, model)),
dict(name='on_train_epoch_start'),
*model._train_batch(trainer, model, train_batches, device=device, **kwargs),
dict(name='train', args=(False, )),
dict(name='on_validation_model_eval'),
dict(name='zero_grad'),
dict(name='Callback.on_validation_start', args=(trainer, model)),
dict(name='on_validation_start'),
*model._eval_epoch('validation', trainer, model, val_batches, 'x', device=device),
dict(name='Callback.on_validation_end', args=(trainer, model)),
dict(name='on_validation_end'),
dict(name='train', args=(True, )),
dict(name='on_validation_model_train'),
dict(name='training_epoch_end', args=([dict(loss=ANY)] * train_batches, )),
dict(name='Callback.on_train_epoch_end', args=(trainer, model, [dict(loss=ANY)] * train_batches)),
# `ModelCheckpoint.save_checkpoint` is called here from `Callback.on_train_epoch_end`
dict(name='Callback.on_save_checkpoint', args=(trainer, model, saved_ckpt)),
dict(name='on_save_checkpoint', args=(saved_ckpt, )),
dict(name='on_train_epoch_end', args=([dict(loss=ANY)] * train_batches, )),
dict(name='Callback.on_epoch_end', args=(trainer, model)),
dict(name='on_epoch_end'),
dict(name='Callback.on_train_end', args=(trainer, model)),
dict(name='on_train_end'),
dict(name='Callback.on_fit_end', args=(trainer, model)),
dict(name='on_fit_end'),
dict(name='Callback.teardown', args=(trainer, model), kwargs=dict(stage='fit')),
dict(name='teardown', kwargs=dict(stage='fit')),
]
assert called == expected
def test_trainer_model_hook_system_fit_no_val_and_resume(tmpdir):
# initial training to get a checkpoint
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=1,
limit_val_batches=0,
progress_bar_refresh_rate=0,
weights_summary=None,
)
trainer.fit(model)
best_model_path = trainer.checkpoint_callback.best_model_path
# resume from checkpoint with HookedModel
called = []
model = HookedModel(called)
callback = HookedCallback(called)
train_batches = 2
trainer = Trainer(
default_root_dir=tmpdir,
# already performed 1 step, now resuming to do an additional 2
max_steps=(1 + train_batches),
limit_val_batches=0,
progress_bar_refresh_rate=0,
weights_summary=None,
resume_from_checkpoint=best_model_path,
callbacks=[callback]
)
assert called == [
dict(name='Callback.on_init_start', args=(trainer, )),
dict(name='Callback.on_init_end', args=(trainer, )),
]
trainer.fit(model)
saved_ckpt = {
'callbacks': ANY,
'epoch': 2, # TODO: wrong saved epoch
'global_step': (1 + train_batches),
'lr_schedulers': ANY,
'optimizer_states': ANY,
'pytorch-lightning_version': __version__,
'state_dict': ANY,
}
expected = [
dict(name='Callback.on_init_start', args=(trainer, )),
dict(name='Callback.on_init_end', args=(trainer, )),
dict(name='prepare_data'),
dict(name='configure_callbacks'),
dict(name='Callback.on_before_accelerator_backend_setup', args=(trainer, model)),
dict(name='Callback.setup', args=(trainer, model), kwargs=dict(stage='fit')),
dict(name='setup', kwargs=dict(stage='fit')),
dict(
name='on_load_checkpoint',
args=({
'callbacks': ANY,
'epoch': 1,
'global_step': 1,
'lr_schedulers': ANY,
'optimizer_states': ANY,
'pytorch-lightning_version': __version__,
'state_dict': ANY,
}, )
),
dict(name='configure_sharded_model'),
dict(name='Callback.on_configure_sharded_model', args=(trainer, model)),
dict(name='configure_optimizers'),
dict(name='Callback.on_fit_start', args=(trainer, model)),
dict(name='on_fit_start'),
dict(name='Callback.on_pretrain_routine_start', args=(trainer, model)),
dict(name='on_pretrain_routine_start'),
dict(name='Callback.on_pretrain_routine_end', args=(trainer, model)),
dict(name='on_pretrain_routine_end'),
dict(name='train', args=(True, )),
dict(name='on_train_dataloader'),
dict(name='train_dataloader'),
# even though no validation runs, we initialize the val dataloader for properties like `num_val_batches`
dict(name='on_val_dataloader'),
dict(name='val_dataloader'),
dict(name='Callback.on_train_start', args=(trainer, model)),
dict(name='on_train_start'),
dict(name='Callback.on_epoch_start', args=(trainer, model)),
dict(name='on_epoch_start'),
dict(name='Callback.on_train_epoch_start', args=(trainer, model)),
dict(name='on_train_epoch_start'),
# TODO: wrong current epoch after reload
*model._train_batch(trainer, model, train_batches, current_epoch=1),
dict(name='training_epoch_end', args=([dict(loss=ANY)] * train_batches, )),
dict(name='Callback.on_train_epoch_end', args=(
trainer,
model,
[dict(loss=ANY)] * train_batches,
)),
dict(name='Callback.on_save_checkpoint', args=(trainer, model, saved_ckpt)),
dict(name='on_save_checkpoint', args=(saved_ckpt, )),
dict(name='on_train_epoch_end', args=([dict(loss=ANY)] * train_batches, )),
dict(name='Callback.on_epoch_end', args=(trainer, model)),
dict(name='on_epoch_end'),
dict(name='Callback.on_train_end', args=(trainer, model)),
dict(name='on_train_end'),
dict(name='Callback.on_fit_end', args=(trainer, model)),
dict(name='on_fit_end'),
dict(name='Callback.teardown', args=(trainer, model), kwargs=dict(stage='fit')),
dict(name='teardown', kwargs=dict(stage='fit')),
]
assert called == expected
@pytest.mark.parametrize('batches', (0, 2))
@pytest.mark.parametrize(['verb', 'noun', 'dataloader', 'key'], [
('validate', 'validation', 'val', 'x'),
('test', 'test', 'test', 'y'),
])
def test_trainer_model_hook_system_eval(tmpdir, batches, verb, noun, dataloader, key):
called = []
model = HookedModel(called)
callback = HookedCallback(called)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=batches,
limit_test_batches=batches,
progress_bar_refresh_rate=0,
weights_summary=None,
callbacks=[callback],
)
assert called == [
dict(name='Callback.on_init_start', args=(trainer, )),
dict(name='Callback.on_init_end', args=(trainer, )),
]
fn = getattr(trainer, verb)
fn(model, verbose=False)
hooks = [
dict(name='train', args=(False, )),
dict(name=f'on_{noun}_model_eval'),
dict(name='zero_grad'),
dict(name=f'Callback.on_{noun}_start', args=(trainer, model)),
dict(name=f'on_{noun}_start'),
*model._eval_epoch(noun, trainer, model, batches, key),
dict(name=f'Callback.on_{noun}_end', args=(trainer, model)),
dict(name=f'on_{noun}_end'),
dict(name='train', args=(True, )),
dict(name=f'on_{noun}_model_train'),
]
expected = [
dict(name='Callback.on_init_start', args=(trainer, )),
dict(name='Callback.on_init_end', args=(trainer, )),
dict(name='prepare_data'),
dict(name='configure_callbacks'),
dict(name='Callback.on_before_accelerator_backend_setup', args=(trainer, model)),
dict(name='Callback.setup', args=(trainer, model), kwargs=dict(stage=verb)),
dict(name='setup', kwargs=dict(stage=verb)),
dict(name='configure_sharded_model'),
dict(name='Callback.on_configure_sharded_model', args=(trainer, model)),
dict(name=f'on_{dataloader}_dataloader'),
dict(name=f'{dataloader}_dataloader'),
*(hooks if batches else []),
dict(name='Callback.teardown', args=(trainer, model), kwargs=dict(stage=verb)),
dict(name='teardown', kwargs=dict(stage=verb)),
]
assert called == expected
def test_trainer_model_hook_system_predict(tmpdir):
called = []
model = HookedModel(called)
callback = HookedCallback(called)
batches = 2
trainer = Trainer(
default_root_dir=tmpdir,
limit_predict_batches=batches,
progress_bar_refresh_rate=0,
callbacks=[callback],
)
assert called == [
dict(name='Callback.on_init_start', args=(trainer, )),
dict(name='Callback.on_init_end', args=(trainer, )),
]
trainer.predict(model)
expected = [
dict(name='Callback.on_init_start', args=(trainer, )),
dict(name='Callback.on_init_end', args=(trainer, )),
dict(name='prepare_data'),
dict(name='configure_callbacks'),
dict(name='Callback.on_before_accelerator_backend_setup', args=(trainer, model)),
dict(name='Callback.setup', args=(trainer, model), kwargs=dict(stage='predict')),
dict(name='setup', kwargs=dict(stage='predict')),
dict(name='configure_sharded_model'),
dict(name='Callback.on_configure_sharded_model', args=(trainer, model)),
dict(name='on_predict_dataloader'),
dict(name='predict_dataloader'),
dict(name='train', args=(False, )),
dict(name='on_predict_model_eval'),
dict(name='zero_grad'),
dict(name='Callback.on_predict_start', args=(trainer, model)),
dict(name='on_predict_start'),
# TODO: `{,Callback}.on_epoch_{start,end}`
dict(name='Callback.on_predict_epoch_start', args=(trainer, model)),
dict(name='on_predict_epoch_start'),
*model._predict_batch(trainer, model, batches),
# TODO: `predict_epoch_end`
dict(name='Callback.on_predict_epoch_end', args=(trainer, model, [[ANY] * batches])),
dict(name='on_predict_epoch_end', args=([[ANY] * batches], )),
dict(name='Callback.on_predict_end', args=(trainer, model)),
dict(name='on_predict_end'),
# TODO: `on_predict_model_train`
dict(name='Callback.teardown', args=(trainer, model), kwargs=dict(stage='predict')),
dict(name='teardown', kwargs=dict(stage='predict')),
]
assert called == expected
# TODO: add test for tune
def test_hooks_with_different_argument_names(tmpdir):
"""
Test that argument names can be anything in the hooks
"""
class CustomBoringModel(BoringModel):
def assert_args(self, x, batch_nb):
assert isinstance(x, torch.Tensor)
assert x.size() == (1, 32)
assert isinstance(batch_nb, int)
def training_step(self, x1, batch_nb1):
self.assert_args(x1, batch_nb1)
return super().training_step(x1, batch_nb1)
def validation_step(self, x2, batch_nb2):
self.assert_args(x2, batch_nb2)
return super().validation_step(x2, batch_nb2)
def test_step(self, x3, batch_nb3, dl_idx3):
self.assert_args(x3, batch_nb3)
assert isinstance(dl_idx3, int)
return super().test_step(x3, batch_nb3)
def predict(self, x4, batch_nb4, dl_idx4):
self.assert_args(x4, batch_nb4)
assert isinstance(dl_idx4, int)
return super().predict(x4, batch_nb4, dl_idx4)
def test_dataloader(self):
return [DataLoader(RandomDataset(32, 64)), DataLoader(RandomDataset(32, 64))]
def predict_dataloader(self):
return [DataLoader(RandomDataset(32, 64)), DataLoader(RandomDataset(32, 64))]
model = CustomBoringModel()
model.test_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
fast_dev_run=5,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
trainer.test(ckpt_path=None)
preds = trainer.predict(model)
assert len(preds) == 2
assert all(len(x) == 5 for x in preds)
def test_trainer_datamodule_hook_system(tmpdir):
"""Test the LightningDataModule hook system."""
class HookedDataModule(BoringDataModule):
def __init__(self, called):
super().__init__()
def call(hook, fn, *args, **kwargs):
out = fn(*args, **kwargs)
d = {'name': hook}
if args:
d['args'] = args
if kwargs:
d['kwargs'] = kwargs
called.append(d)
return out
for h in get_members(LightningDataModule):
attr = getattr(self, h)
setattr(self, h, partial(call, h, attr))
model = BoringModel()
batches = 2
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=batches,
limit_val_batches=batches,
limit_test_batches=batches,
limit_predict_batches=batches,
progress_bar_refresh_rate=0,
weights_summary=None,
reload_dataloaders_every_epoch=True,
)
called = []
dm = HookedDataModule(called)
trainer.fit(model, datamodule=dm)
batch_transfer = [
dict(name='on_before_batch_transfer', args=(ANY, 0)),
dict(name='transfer_batch_to_device', args=(ANY, torch.device('cpu'), 0)),
dict(name='on_after_batch_transfer', args=(ANY, 0)),
]
expected = [
dict(name='prepare_data'),
dict(name='setup', kwargs=dict(stage='fit')),
dict(name='val_dataloader'),
*batch_transfer * batches,
dict(name='train_dataloader'),
*batch_transfer * batches,
dict(name='val_dataloader'),
*batch_transfer * batches,
dict(
name='on_save_checkpoint',
args=({
'callbacks': ANY,
'epoch': 1,
'global_step': 2,
'lr_schedulers': ANY,
'optimizer_states': ANY,
'pytorch-lightning_version': __version__,
'state_dict': ANY,
}, )
),
dict(name='teardown', kwargs=dict(stage='fit')),
]
assert called == expected
called = []
dm = HookedDataModule(called)
trainer.validate(model, datamodule=dm, verbose=False)
expected = [
dict(name='prepare_data'),
dict(name='setup', kwargs=dict(stage='validate')),
dict(name='val_dataloader'),
*batch_transfer * batches,
dict(name='teardown', kwargs=dict(stage='validate')),
]
assert called == expected
called = []
dm = HookedDataModule(called)
trainer.test(model, datamodule=dm, verbose=False)
expected = [
dict(name='prepare_data'),
dict(name='setup', kwargs=dict(stage='test')),
dict(name='test_dataloader'),
*batch_transfer * batches,
dict(name='teardown', kwargs=dict(stage='test')),
]
assert called == expected
called = []
dm = HookedDataModule(called)
trainer.predict(model, datamodule=dm)
expected = [
dict(name='prepare_data'),
dict(name='setup', kwargs=dict(stage='predict')),
dict(name='predict_dataloader'),
*batch_transfer * batches,
dict(name='teardown', kwargs=dict(stage='predict')),
]
assert called == expected
|
py | 7df871ce16a1eabbffc9a06511bff9d83a10fdf2 | #!/usr/bin/env python3
"""Module containing the PMX mutate class and the command line interface."""
import os
from pathlib import Path
import re
import shutil
import argparse
from typing import Mapping
from biobb_pmx.pmx.common import create_mutations_file, MUTATION_DICT
from biobb_common.generic.biobb_object import BiobbObject
from biobb_common.configuration import settings
from biobb_common.tools import file_utils as fu
from biobb_common.tools.file_utils import launchlogger
from biobb_common.command_wrapper import cmd_wrapper
class Pmxmutate(BiobbObject):
"""
| biobb_pmx Pmxmutate
| Wrapper class for the `PMX mutate <https://github.com/deGrootLab/pmx>`_ module.
Args:
input_structure_path (str): Path to the input structure file. File type: input. `Sample file <https://github.com/bioexcel/biobb_pmx/raw/master/biobb_pmx/test/data/pmx/frame99.pdb>`_. Accepted formats: pdb (edam:format_1476), gro (edam:format_2033).
output_structure_path (str): Path to the output structure file. File type: output. `Sample file <https://github.com/bioexcel/biobb_pmx/raw/master/biobb_pmx/test/reference/pmx/ref_output_structure.pdb>`_. Accepted formats: pdb (edam:format_1476), gro (edam:format_2033).
input_b_structure_path (str) (Optional): Path to the mutated input structure file. File type: input. Accepted formats: pdb (edam:format_1476), gro (edam:format_2033).
properties (dic):
* **mutation_list** (*str*) - ("2Ala") Mutation list in the format "Chain:Resnum MUT_AA_Code" or "Chain:Resnum MUT_NA_Code" (no spaces between the elements) separated by commas. If no chain is provided as chain code all the chains in the pdb file will be mutated. ie: "A:15CYS". Possible MUT_AA_Code: 'ALA', 'ARG', 'ASN', 'ASP', 'ASPH', 'ASPP', 'ASH', 'CYS', 'CYS2', 'CYN', 'CYX', 'CYM', 'CYSH', 'GLU', 'GLUH', 'GLUP', 'GLH', 'GLN', 'GLY', 'HIS', 'HIE', 'HISE', 'HSE', 'HIP', 'HSP', 'HISH', 'HID', 'HSD', 'ILE', 'LEU', 'LYS', 'LYSH', 'LYP', 'LYN', 'LSN', 'MET', 'PHE', 'PRO', 'SER', 'SP1', 'SP2', 'THR', 'TRP', 'TYR', 'VAL'. Possible MUT_NA_Codes: 'A', 'T', 'C', 'G', 'U'.
* **force_field** (*str*) - ("amber99sb-star-ildn-mut") Forcefield to use.
* **resinfo** (*bool*) - (False) Show the list of 3-letter -> 1-letter residues.
* **gmx_lib** (*str*) - ("$CONDA_PREFIX/lib/python3.7/site-packages/pmx/data/mutff45/") Path to the GMXLIB folder in your computer.
* **pmx_path** (*str*) - ("pmx") Path to the PMX command line interface.
* **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files.
* **restart** (*bool*) - (False) [WF property] Do not execute if output files exist.
* **container_path** (*str*) - (None) Path to the binary executable of your container.
* **container_image** (*str*) - (None) Container Image identifier.
* **container_volume_path** (*str*) - ("/inout") Path to an internal directory in the container.
* **container_working_dir** (*str*) - (None) Path to the internal CWD in the container.
* **container_user_id** (*str*) - (None) User number id to be mapped inside the container.
* **container_shell_path** (*str*) - ("/bin/bash") Path to the binary executable of the container shell.
Examples:
This is a use example of how to use the building block from Python::
from biobb_pmx.pmx.pmxmutate import pmxmutate
prop = {
'mutation_list': '2Ala, 3Val',
'gmx_lib': '/path/to/myGMXLIB/',
'force_field': 'amber99sb-star-ildn-mut'
}
pmxmutate(input_structure_path='/path/to/myStructure.pdb',
output_structure_path='/path/to/newStructure.pdb',
input_b_structure_path='/path/to/myStructureB.pdb'
properties=prop)
Info:
* wrapped_software:
* name: PMX mutate
* version: >=1.0.1
* license: GNU
* ontology:
* name: EDAM
* schema: http://edamontology.org/EDAM.owl
"""
def __init__(self, input_structure_path: str, output_structure_path: str, input_b_structure_path: str = None,
properties: Mapping = None, **kwargs) -> None:
properties = properties or {}
# Call parent class constructor
super().__init__(properties)
# Input/Output files
self.io_dict = {
"in": {"input_structure_path": input_structure_path, "input_b_structure_path": input_b_structure_path},
"out": {"output_structure_path": output_structure_path}
}
# Properties specific for BB
self.force_field = properties.get('force_field', "amber99sb-star-ildn-mut")
self.resinfo = properties.get('resinfo', False)
self.mutation_list = properties.get('mutation_list', '2Ala')
self.input_mutations_file = properties.get('mutations_file')
# Properties common in all PMX BB
self.gmx_lib = properties.get('gmx_lib', None)
if not self.gmx_lib and os.environ.get('CONDA_PREFIX'):
self.gmx_lib = str(
Path(os.environ.get('CONDA_PREFIX')).joinpath("lib/python3.7/site-packages/pmx/data/mutff45/"))
if properties.get('container_path'):
self.gmx_lib = str(Path('/usr/local/').joinpath("lib/python3.7/site-packages/pmx/data/mutff45/"))
self.pmx_path = properties.get('pmx_path', 'pmx')
# Check the properties
self.check_properties(properties)
@launchlogger
def launch(self) -> int:
"""Execute the :class:`Pmxmutate <pmx.pmxmutate.Pmxmutate>` pmx.pmxmutate.Pmxmutate object."""
# Setup Biobb
if self.check_restart(): return 0
self.stage_files()
# Check if executable exists
if not self.container_path:
if not Path(self.pmx_path).is_file():
if not shutil.which(self.pmx_path):
raise FileNotFoundError(
'Executable %s not found. Check if it is installed in your system and correctly defined in the properties' % self.pmx_path)
# Generate mutations file
mutations_dir = fu.create_unique_dir()
self.input_mutations_file = create_mutations_file(input_mutations_path=str(Path(mutations_dir).joinpath('mutations.txt')),
mutation_list=self.mutation_list,
mutation_dict=MUTATION_DICT)
# Copy extra files to container: mutations file
if self.container_path:
fu.log('Container execution enabled', self.out_log)
shutil.copy2(self.input_mutations_file, self.stage_io_dict.get("unique_dir"))
self.input_mutations_file = str(Path(self.container_volume_path).joinpath(Path(self.input_mutations_file).name))
self.cmd = [self.pmx_path, 'mutate',
'-f', self.stage_io_dict["in"]["input_structure_path"],
'-o', self.stage_io_dict["out"]["output_structure_path"],
'-ff', self.force_field,
'--script', self.input_mutations_file]
if self.stage_io_dict["in"].get("input_b_structure_path"):
self.cmd.append('-fB')
self.cmd.append(self.stage_io_dict["in"]["input_b_structure_path"])
if self.resinfo:
self.cmd.append('-resinfo')
if self.gmx_lib:
self.environment = os.environ.copy()
self.environment['GMXLIB'] = self.gmx_lib
# Run Biobb block
self.run_biobb()
# Copy files to host
self.copy_to_host()
self.tmp_files.append(self.stage_io_dict.get("unique_dir"))
self.remove_tmp_files()
return self.return_code
def pmxmutate(input_structure_path: str, output_structure_path: str,
input_b_structure_path: str = None, properties: dict = None,
**kwargs) -> int:
"""Execute the :class:`Pmxmutate <pmx.pmxmutate.Pmxmutate>` class and
execute the :meth:`launch() <pmx.pmxmutate.Pmxmutate.launch> method."""
return Pmxmutate(input_structure_path=input_structure_path,
output_structure_path=output_structure_path,
input_b_structure_path=input_b_structure_path,
properties=properties, **kwargs).launch()
def main():
"""Command line execution of this building block. Please check the command line documentation."""
parser = argparse.ArgumentParser(description="Run PMX mutate module",
formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))
parser.add_argument('-c', '--config', required=False, help="This file can be a YAML file, JSON file or JSON string")
# Specific args of each building block
required_args = parser.add_argument_group('required arguments')
required_args.add_argument('--input_structure_path', required=True, help="Path to the input structure file")
required_args.add_argument('--output_structure_path', required=True, help="Path to the output structure file")
parser.add_argument('--input_b_structure_path', required=False, help="Path to the mutated input structure file")
args = parser.parse_args()
config = args.config if args.config else None
properties = settings.ConfReader(config=config).get_prop_dic()
# Specific call of each building block
pmxmutate(input_structure_path=args.input_structure_path,
output_structure_path=args.output_structure_path,
input_b_structure_path=args.input_b_structure_path,
properties=properties)
if __name__ == '__main__':
main()
|
py | 7df871d43fb638214a4bf4d462ea8197e9c64552 | import re
import os
import datetime
import yaml
import yamlordereddictloader
from collections import defaultdict
from yaml.representer import Representer
# set up defaultdict representation
yaml.add_representer(defaultdict, Representer.represent_dict)
PHONE_RE = re.compile(r'''^
\D*(1?)\D* # prefix
(\d{3})\D*(\d{3})\D*(\d{4}).*? # main 10 digits
(?:(?:ext|Ext|EXT)\.?\s*\s*(\d{1,4}))? # extension
$''', re.VERBOSE)
def reformat_phone_number(phone):
match = PHONE_RE.match(phone)
if match:
groups = match.groups()
ext = groups[-1]
if ext:
ext = f' ext. {ext}'
else:
ext = ''
if not groups[0]:
groups = groups[1:-1]
else:
groups = groups[:-1]
return '-'.join(groups) + ext
else:
return phone
def reformat_address(address):
return re.sub(r'\s+', ' ', re.sub(r'\s*\n\s*', ';', address))
def get_data_dir(abbr):
return os.path.join(os.path.dirname(__file__), '../test/', abbr)
def get_jurisdiction_id(abbr):
if abbr == 'dc':
return 'ocd-jurisdiction/country:us/district:dc/government'
elif abbr in ('vi', 'pr'):
return f'ocd-jurisdiction/country:us/territory:{abbr}/government'
else:
return f'ocd-jurisdiction/country:us/state:{abbr}/government'
def load_yaml(file_obj):
return yaml.load(file_obj, Loader=yamlordereddictloader.SafeLoader)
def dump_obj(obj, *, output_dir=None, filename=None):
if output_dir:
filename = os.path.join(output_dir, get_filename(obj))
if not filename:
raise ValueError('must provide output_dir or filename parameter')
with open(filename, 'w') as f:
yaml.dump(obj, f, default_flow_style=False, Dumper=yamlordereddictloader.SafeDumper)
def get_filename(obj):
id = obj['id'].split('/')[1]
name = obj['name']
name = re.sub('\s+', '-', name)
name = re.sub('[^a-zA-Z-]', '', name)
return f'{name}-{id}.yml'
def role_is_active(role):
now = datetime.datetime.utcnow().date().strftime('%Y-%m-%d')
return role.get('end_date') is None or role.get('end_date') > now
|
py | 7df872a587e580ed68345543d2cee600f4d37a22 | #
# Copyright 2020 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import furl
from abc import ABC, abstractmethod
import requests
import urllib3
from hsfs.client import exceptions, auth
from hsfs.decorators import connected
urllib3.disable_warnings(urllib3.exceptions.SecurityWarning)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class Client(ABC):
TOKEN_FILE = "token.jwt"
REST_ENDPOINT = "REST_ENDPOINT"
DEFAULT_DATABRICKS_ROOT_VIRTUALENV_ENV = "DEFAULT_DATABRICKS_ROOT_VIRTUALENV_ENV"
@abstractmethod
def __init__(self):
"""To be implemented by clients."""
pass
def _get_verify(self, verify, trust_store_path):
"""Get verification method for sending HTTP requests to Hopsworks.
Credit to https://gist.github.com/gdamjan/55a8b9eec6cf7b771f92021d93b87b2c
:param verify: perform hostname verification, 'true' or 'false'
:type verify: str
:param trust_store_path: path of the truststore locally if it was uploaded manually to
the external environment such as AWS Sagemaker
:type trust_store_path: str
:return: if verify is true and the truststore is provided, then return the trust store location
if verify is true but the truststore wasn't provided, then return true
if verify is false, then return false
:rtype: str or boolean
"""
if verify == "true":
if trust_store_path is not None:
return trust_store_path
else:
return True
return False
def _get_host_port_pair(self):
"""
Removes "http or https" from the rest endpoint and returns a list
[endpoint, port], where endpoint is on the format /path.. without http://
:return: a list [endpoint, port]
:rtype: list
"""
endpoint = self._base_url
if "http" in endpoint:
last_index = endpoint.rfind("/")
endpoint = endpoint[last_index + 1 :]
host, port = endpoint.split(":")
return host, port
def _read_jwt(self):
"""Retrieve jwt from local container."""
with open(self.TOKEN_FILE, "r") as jwt:
return jwt.read()
@connected
def _send_request(
self,
method,
path_params,
query_params=None,
headers=None,
data=None,
stream=False,
):
"""Send REST request to Hopsworks.
Uses the client it is executed from. Path parameters are url encoded automatically.
:param method: 'GET', 'PUT' or 'POST'
:type method: str
:param path_params: a list of path params to build the query url from starting after
the api resource, for example `["project", 119, "featurestores", 67]`.
:type path_params: list
:param query_params: A dictionary of key/value pairs to be added as query parameters,
defaults to None
:type query_params: dict, optional
:param headers: Additional header information, defaults to None
:type headers: dict, optional
:param data: The payload as a python dictionary to be sent as json, defaults to None
:type data: dict, optional
:param stream: Set if response should be a stream, defaults to False
:type stream: boolean, optional
:raises RestAPIError: Raised when request wasn't correctly received, understood or accepted
:return: Response json
:rtype: dict
"""
base_path_params = ["hopsworks-api", "api"]
f_url = furl.furl(self._base_url)
f_url.path.segments = base_path_params + path_params
url = str(f_url)
request = requests.Request(
method,
url=url,
headers=headers,
data=data,
params=query_params,
auth=self._auth,
)
prepped = self._session.prepare_request(request)
response = self._session.send(prepped, verify=self._verify, stream=stream)
if response.status_code == 401 and self.REST_ENDPOINT in os.environ:
# refresh token and retry request - only on hopsworks
self._auth = auth.BearerAuth(self._read_jwt())
# Update request with the new token
request.auth = self._auth
prepped = self._session.prepare_request(request)
response = self._session.send(prepped, verify=self._verify, stream=stream)
if response.status_code // 100 != 2:
raise exceptions.RestAPIError(url, response)
if stream:
return response
else:
# handle different success reponse codes
if response.status_code == 204:
return None
return response.json()
def _close(self):
"""Closes a client. Can be implemented for clean up purposes, not mandatory."""
self._connected = False
|
py | 7df873ee4f191845e42ef141bd46e31316afd420 | import os
import tkinter as tk
from tkinter import messagebox
from tkinter import PhotoImage
import datetime
from customWidgets import CustomLabel, CustomFrame, CustomEntry, CustomButton
if not os.path.exists('receipts/'):
os.mkdir('receipts/')
with open('receipts/orders.txt', 'w') as file:
file.write('OrderN12121')
class Application(tk.Frame):
def __init__(self, master=None):
super().__init__(master=master)
self.master = master
self.grid()
self.draw_frames()
self.draw_title_widgets()
self.draw_body_widgets()
self.itemCost = {
'Tea' : 10,
'Coffee' : 15,
'Pastery' : 80,
'Pizza' : 199,
'Fries' : 60,
'Burger' : 49,
'Pepsi' : 14,
'Cookies' : 4
}
self.discount_dict = {
0:0, 100 : 1, 300 : 2, 500 : 3, 1000 : 5, 2000 : 8, 5000 : 15
}
self.itemList = [item for item in self.itemCost.keys()]
self.entryValues = [tk.StringVar() for item in self.itemList]
self.expression = ''
self.draw_item_frame_widgets()
self.draw_bill_frame_widgets()
self.draw_controller_frame_widgets()
self.draw_calculator_frame_widgets()
def draw_frames(self):
self.title_frame = tk.Frame(self, width=800, height=105, bg='white')
self.main_frame = tk.LabelFrame(self, width=800, height=280, bg='white')
self.title_frame.grid(row=0, column=0)
self.main_frame.grid(row=1, column=0, pady=5)
self.title_frame.grid_propagate(False)
self.main_frame.grid_propagate(False)
def draw_title_widgets(self):
self.icon = tk.Label(self.title_frame, image=coffee_icon1, bg='white')
self.icon.grid(row=0, column=0, rowspan=2, padx=(10,3))
self.title = tk.Label(self.title_frame, width=24, height=2,
text='Honest Bistro Cafe', font=('verdana',22,'bold'), fg="#248aa2", bg="white")
self.title.grid(row=0, column=1, columnspan=3)
self.l1 = tk.Label(self.title_frame, bg='#248aa2', width=25)
self.l2 = tk.Label(self.title_frame, bg='#248aa2', width=5)
self.l1.grid(row=1, column=1)
self.l2.grid(row=1, column=3)
self.date_time = tk.Label(self.title_frame, text=self.get_current_datetime(),
fg='#fe4a49', font=('verdana', 12, 'bold'))
self.date_time.after(1000, self.update_datetime_label)
self.date_time.grid(row=1, column=2, padx=5)
def draw_body_widgets(self):
self.items_frame = CustomFrame(self.main_frame, text='Cafe Items', height=240, width=170)
self.items_frame.grid(row=0, column=0, padx=(15,10), pady=15, rowspan=2, sticky='NW')
self.bill_frame = CustomFrame(self.main_frame, text='Items Bill', height=200, width=192)
self.bill_frame.grid(row=0, column=1, padx=10, pady=(15,0))
self.controller = CustomFrame(self.main_frame, width=192, height=30, borderwidth=0)
self.controller.grid(row=1, column=1, padx=10, sticky='N')
self.calculator = CustomFrame(self.main_frame, text='Calculator', height=200, width=168)
self.calculator.grid(row=0, column=2, padx=5, pady=15, sticky='NW')
self.contact = CustomFrame(self.main_frame, width=192, height=30, borderwidth=0)
self.contact.grid(row=1, column=2, padx=10, sticky='N')
l1 = tk.Label(self.contact, bg='#248aa2', width=25, height=2,
text=' Opens 09:00 to 08:30 \n Contact : 876655444',
font=('Verdana', 8, 'bold'), fg='white', anchor='w')
l1.grid(row=0, column=0)
self.items_frame.grid_propagate(False)
self.bill_frame.grid_propagate(False)
self.controller.grid_propagate(False)
self.calculator.grid_propagate(False)
def draw_item_frame_widgets(self):
for row, item in enumerate(self.itemList):
label = CustomLabel(self.items_frame, text=(' ' + item))
label.grid(row=row, column=0, pady=(4,0))
entry = CustomEntry(self.items_frame, textvariable=self.entryValues[row])
entry.grid(row=row, column=1, pady=(4,0))
def draw_bill_frame_widgets(self):
self.chargeList = ['Items Cost', 'Service Charge', 'GST Charges','Discount', 'Total']
self.chargeValues = [tk.StringVar() for item in self.chargeList]
for row, item in enumerate(self.chargeList):
label = CustomLabel(self.bill_frame, text=item, width=12)
label.grid(row=row, column=0, pady=(3,0))
entry = CustomEntry(self.bill_frame, textvariable=self.chargeValues[row])
entry.grid(row=row, column=1, pady=(3,0))
self.name_frame = tk.LabelFrame(self.bill_frame)
self.name_frame.grid(row=5, column=0, columnspan=2, pady=(15,0), padx=3)
self.customer_name = tk.StringVar()
self.name = CustomLabel(self.name_frame, text='Name', width=7)
self.name_entry = CustomEntry(self.name_frame, width=15, textvariable=self.customer_name)
self.name.grid(row=0, column=0, pady=2, padx=1)
self.name_entry.grid(row=0, column=1, pady=2, padx=1)
def draw_controller_frame_widgets(self):
self.total = CustomButton(self.controller, text='Calculate',
command=self.calculate_payment)
self.total.grid(row=0, column=0, padx=(2,0))
self.clear = CustomButton(self.controller, text='Clear', command=self.clear_all)
self.clear.grid(row=0, column=1, padx=(2,0))
self.receipt = CustomButton(self.controller, text='Receipt', command=self.get_receipt)
self.receipt.grid(row=0, column=2, padx=(2,0))
def draw_calculator_frame_widgets(self):
self.input_string = tk.StringVar()
self.calc_entry = CustomEntry(self.calculator, width=23, textvariable=self.input_string)
self.calc_entry.grid(row=0, column=0, columnspan=4, pady=(5,3), padx=4)
self.seven = self.create_button(self.calculator, '7', '#248aa2', "white",
lambda : self.get('7'), 1, 0)
self.eight = self.create_button(self.calculator, '8', '#248aa2', "white",
lambda : self.get('8'), 1, 1)
self.nine = self.create_button(self.calculator, '9', '#248aa2', "white",
lambda : self.get('9'), 1, 2)
self.plus = self.create_button(self.calculator, '+', 'white', "black",
lambda : self.get('+'), 1, 3)
self.four = self.create_button(self.calculator, '4', '#248aa2', "white",
lambda : self.get('4'), 2, 0)
self.five = self.create_button(self.calculator, '5', '#248aa2', "white",
lambda : self.get('5'), 2, 1)
self.six = self.create_button(self.calculator, '6', '#248aa2', "white",
lambda : self.get('6'), 2, 2)
self.minus = self.create_button(self.calculator, '-', 'white', "black",
lambda : self.get('-'), 2, 3)
self.one = self.create_button(self.calculator, '1', '#248aa2', "white",
lambda : self.get('1'), 3, 0)
self.two = self.create_button(self.calculator, '2', '#248aa2', "white",
lambda : self.get('2'), 3, 1)
self.three = self.create_button(self.calculator, '3', '#248aa2', "white",
lambda : self.get('3'), 3, 2)
self.mult = self.create_button(self.calculator, '*', 'white', "black",
lambda : self.get('*'), 3, 3)
self.clear = self.create_button(self.calculator, 'C', '#248aa2', "white",
self.delete_calc_text, 4, 0)
self.zero = self.create_button(self.calculator, '0', '#248aa2', "white",
lambda : self.get('0'), 4, 1)
self.equal = self.create_button(self.calculator, '=', '#248aa2', "white",
self.evaluate_expression, 4, 2)
self.div = self.create_button(self.calculator, '/', 'white', "black",
lambda : self.get('/'), 4, 3)
def create_button(self, parent, text, bg, fg, command, r, c):
self.button = tk.Button(parent, bg=bg, fg=fg, font=('Arial',10, 'bold'))
self.button['text'] = text
self.button['command'] = command
self.button.config(height=1, width=3)
self.button.grid(row=r, column=c, pady=4)
return self.button
def get(self, value):
ops = ['+', '-', '*', '/']
if self.expression == 'error':
self.expression = ''
if len(self.expression) == 0:
if value in ['+', '-']:
self.expression += value
elif value in ['*', '/']:
pass
else:
self.expression += value
elif len(self.expression) > 0:
if value in ops and self.expression[-1] in ops:
self.expression = self.expression[:-1] + value
else:
self.expression += value
self.input_string.set(self.expression)
def evaluate_expression(self):
if len(self.expression) > 0:
try:
self.expression = str(round(eval(self.expression), 2))
except:
self.expression = 'error'
self.input_string.set(self.expression)
def delete_calc_text(self):
self.expression = ''
self.calc_entry.delete(0,'end')
def get_current_datetime(self):
dt = datetime.datetime.now()
return dt.strftime('%b %d ,%Y %I:%M:%S %p')
def update_datetime_label(self):
dt = self.get_current_datetime()
self.date_time['text'] = dt
self.date_time.after(1000, self.update_datetime_label)
def get_order_details(self):
total_cost = 0
for index, item in enumerate(self.itemList):
qty = self.entryValues[index].get()
if qty:
self.hasBought = True
cost = int(qty) * self.itemCost[item]
total_cost += cost
if self.hasBought:
service_charge = round(0.04 * total_cost, 2)
gst = round(0.05 * total_cost, 2)
total = total_cost + service_charge + gst
disc = 0
for d in self.discount_dict.keys():
if total >= d:
disc = d
cut = self.discount_dict[disc]
discount = round((cut / 100) * total_cost, 2)
total = round(total - discount, 2)
else:
total_cost, service_charge, gst, discount, total = [0 for i in range(5)]
return total_cost, service_charge, gst, discount, total
def calculate_payment(self):
self.hasBought = False
total_cost, service_charge, gst, discount, total = self.get_order_details()
if self.hasBought:
self.chargeValues[0].set(str(total_cost))
self.chargeValues[1].set(str(service_charge))
self.chargeValues[2].set(str(gst))
self.chargeValues[3].set(str(discount))
self.chargeValues[4].set(str(total))
def get_receipt(self):
if self.chargeValues[4].get() != '':
total_cost, service_charge, gst, discount, total = [self.chargeValues[i].get()
for i in range(5)]
name = self.customer_name.get()
if name:
current = self.make_entry()
with open(f'receipts/order_receipt_{current}.txt', 'w') as file:
file.write('Order Summary\n')
file.write(f'Customer Name : {name}\n')
file.write(f'Order date time : {self.get_current_datetime()}\n\n')
i, q, p, t = 'item', 'quantity', 'price', 'total cost'
file.write(f' {i:<12} | {q:<8} | {p:<8} | {t:<12}\n')
for index, item in enumerate(self.itemList):
qty = self.entryValues[index].get()
if qty:
self.hasBought = True
c = self.itemCost[item]
cost = int(qty) * c
file.write(f' {item.lower():<12}| {qty:<8} | {c:<8} | {cost:<12}\n')
file.write('\n')
file.write(f'SubCost : {total_cost}\n')
file.write(f'Service Charge : {service_charge}\n')
file.write(f'GST : {gst}\n')
file.write(f'Discount : {discount}\n')
file.write(f'Total Cost : {total}\n')
messagebox.showinfo('Honest Bistro', 'Receipt Generated')
else:
messagebox.showerror('Honest Bistro', 'Customer Name is required.')
else:
messagebox.showerror('Honest Bistro', 'No items Bought')
def make_entry(self):
with open('receipts/orders.txt') as file:
current = file.readline()
current = int(current[6:]) + 1
with open('receipts/orders.txt', 'w') as file:
file.write(('OrderN'+str(current)))
return current
def clear_all(self):
for index, item in enumerate(self.itemList):
self.entryValues[index].set('')
for index, item in enumerate(self.chargeList):
self.chargeValues[index].set('')
self.customer_name.set('')
self.hasBought = False
if __name__ == '__main__':
root = tk.Tk()
root.geometry('600x390')
root.title('Honest Bistro')
root.resizable(0,0)
coffee_icon1 = PhotoImage(file='icons/coffee.png')
app = Application(master=root)
app.mainloop() |
py | 7df87490c609300788b769a78364f21689d1cd90 | """Support for monitoring the Deluge BitTorrent client API."""
import logging
from deluge_client import DelugeRPCClient, FailedToReconnectException
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST,
CONF_MONITORED_VARIABLES,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
STATE_IDLE,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
_THROTTLED_REFRESH = None
DEFAULT_NAME = "Deluge"
DEFAULT_PORT = 58846
DHT_UPLOAD = 1000
DHT_DOWNLOAD = 1000
SENSOR_TYPES = {
"current_status": ["Status", None],
"download_speed": ["Down Speed", "kB/s"],
"upload_speed": ["Up Speed", "kB/s"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MONITORED_VARIABLES, default=[]): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Deluge sensors."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
port = config.get(CONF_PORT)
deluge_api = DelugeRPCClient(host, port, username, password)
try:
deluge_api.connect()
except ConnectionRefusedError:
_LOGGER.error("Connection to Deluge Daemon failed")
raise PlatformNotReady
dev = []
for variable in config[CONF_MONITORED_VARIABLES]:
dev.append(DelugeSensor(variable, deluge_api, name))
add_entities(dev)
class DelugeSensor(Entity):
"""Representation of a Deluge sensor."""
def __init__(self, sensor_type, deluge_client, client_name):
"""Initialize the sensor."""
self._name = SENSOR_TYPES[sensor_type][0]
self.client = deluge_client
self.type = sensor_type
self.client_name = client_name
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self.data = None
self._available = False
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.client_name} {self._name}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def available(self):
"""Return true if device is available."""
return self._available
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Get the latest data from Deluge and updates the state."""
try:
self.data = self.client.call(
"core.get_session_status",
[
"upload_rate",
"download_rate",
"dht_upload_rate",
"dht_download_rate",
],
)
self._available = True
except FailedToReconnectException:
_LOGGER.error("Connection to Deluge Daemon Lost")
self._available = False
return
upload = self.data[b"upload_rate"] - self.data[b"dht_upload_rate"]
download = self.data[b"download_rate"] - self.data[b"dht_download_rate"]
if self.type == "current_status":
if self.data:
if upload > 0 and download > 0:
self._state = "Up/Down"
elif upload > 0 and download == 0:
self._state = "Seeding"
elif upload == 0 and download > 0:
self._state = "Downloading"
else:
self._state = STATE_IDLE
else:
self._state = None
if self.data:
if self.type == "download_speed":
kb_spd = float(download)
kb_spd = kb_spd / 1024
self._state = round(kb_spd, 2 if kb_spd < 0.1 else 1)
elif self.type == "upload_speed":
kb_spd = float(upload)
kb_spd = kb_spd / 1024
self._state = round(kb_spd, 2 if kb_spd < 0.1 else 1)
|
py | 7df874f1ed5a471c24bfad12aa93f8c4c91bc7f8 | # model settings
model = dict(
type='CascadeRCNN',
pretrained=None,
backbone=dict(
type='SwinTransformer',
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
ape=False,
patch_norm=True,
out_indices=(0, 1, 2, 3),
use_checkpoint=False),
neck=dict(
type='FPN',
in_channels=[96, 192, 384, 768],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='CascadeRoIHead',
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=4,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=4,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=4,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
# mask_roi_extractor=dict(
# type='SingleRoIExtractor',
# roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
# out_channels=256,
# featmap_strides=[4, 8, 16, 32]),
# mask_head=dict(
# type='FCNMaskHead',
# num_convs=4,
# in_channels=256,
# conv_out_channels=256,
# num_classes=3,
# loss_mask=dict(
# type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))
),
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100,
mask_thr_binary=0.5)))
|
py | 7df8750eb87d31161e534f95746000b398af658e | # !/usr/bin/python
# -*- coding: utf-8 -*-
from PySide.QtGui import *
from PySide.QtCore import Qt
class HeatMapWidget(QWidget):
def __init__(self, parent=None):
super(HeatMapWidget, self).__init__(parent)
self.electrodesPosition = \
{
"AF3": {"x": 82, "y": 57},
"AF4": {"x": 221, "y": 57},
"F7" : {"x": 35, "y": 104},
"F3" : {"x": 114, "y": 107},
"F4" : {"x": 190, "y": 107},
"F8" : {"x": 269, "y": 104},
"FC5": {"x": 67, "y": 149},
"FC6": {"x": 236, "y": 149},
"T7" : {"x": 18, "y": 197},
"T8" : {"x": 286, "y": 197},
"P7" : {"x": 67, "y": 317},
"P8" : {"x": 236, "y": 317},
"O1" : {"x": 113, "y": 375},
"O2" : {"x": 192, "y": 375}
}
self.headsetColors = \
[
(0, 0, 255), # Black
(21, 0, 234), # Red
(43, 0, 212), # Yellow
(64, 0, 191), # Green
(106, 0, 149),
(128, 0, 128),
(149, 0, 106),
(170, 0, 85),
(191, 0, 64),
(212, 0, 43),
(234, 0, 21),
(255, 0, 0)
]
layout = QHBoxLayout()
self.headsetState = QLabel()
layout.addWidget(self.headsetState)
self.setLayout(layout)
self.minValue = -607
self.maxValue = 3075
layout.setAlignment(Qt.AlignHCenter )
self.pixmap = QPixmap("../assets/headset.png")
def updateHeatMapStatus(self, packet):
painter = QPainter()
if painter.begin(self.pixmap):
painter.setFont(QFont('Decorative', 15))
painter.drawText(self.pixmap.rect(), Qt.AlignCenter, "Mapa de calor")
painter.setFont(QFont('Decorative', 8))
for key in self.electrodesPosition:
if key[0] == "O":
painter.drawText(self.electrodesPosition[key]["x"] - 1, self.electrodesPosition[key]["y"] - 20, 30, 15,
Qt.AlignCenter, key)
elif key == "T7":
painter.drawText(self.electrodesPosition[key]["x"] + 7, self.electrodesPosition[key]["y"] + 32, 30, 15,
Qt.AlignCenter, key)
elif key == "T8":
painter.drawText(self.electrodesPosition[key]["x"] - 9, self.electrodesPosition[key]["y"] + 32, 30, 15,
Qt.AlignCenter, key)
else:
painter.drawText( self.electrodesPosition[key]["x"] - 1, self.electrodesPosition[key]["y"] + 32, 30, 15, Qt.AlignCenter, key)
if packet == None:
color = self.headsetColors[0]
painter.setBrush(QColor(0,0,0))
for item in self.electrodesPosition:
painter.drawEllipse(self.electrodesPosition[item]["x"], self.electrodesPosition[item]["y"], 28, 28)
else:
for sensor in packet.sensors:
if sensor in self.electrodesPosition:
quality = ( packet.sensors[sensor]['value'] + -1*self.minValue ) // 307
color = self.headsetColors[ quality ]
painter.setBrush(QColor(color[0], color[1], color[2]))
painter.drawEllipse( self.electrodesPosition[sensor]["x"], self.electrodesPosition[sensor]["y"], 28, 28)
painter.end()
self.headsetState.setPixmap(self.pixmap)
|
py | 7df8757c1cd514e1341dc261f10343a4167dfabb | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from transifex.native.django.tools.migrations.templatetags import \
DjangoTagMigrationBuilder
DJANGO_TEMPLATE = """
{% extends 'base.html' %}
{% load i18n %}
{# Translators: This is an amazing comment #}
{% trans "Hello!" %}
{% comment %}Translators: This is another amazing comment{% endcomment %}
{% trans "May" context "month name" %}
{% with first=events.first %}
{% with second=events.second %}
{% blocktrans with third=events.third|title another='foo' %}
The following events were found:
1. {{ first }}
2. {{ second }}
3. {{ third }}
And this is {{ another }}.
{% endblocktrans %}
{% endwith %}
{% endwith %}
{% blocktrans count counter='something'|length %}
There is only one {{ name }} object.
{% plural %}
There are {{ counter }} {{ name }} objects.
{% endblocktrans %}
{% blocktrans trimmed %}
First sentence.
Second paragraph.
{% endblocktrans %}
<a href="{{ url }}">Text</a>
{{ _(some_other_var) }}
{% trans some_other_var|some_filter %}
{% trans "try as" as try %}
{% trans "try with <xml>xml</xml>" %}
{% blocktrans %}
try with <xml>xml</xml>
{% endblocktrans %}
{% blocktrans with organization_name=organization.name %}Help "{{organization_name}}" translate content {% endblocktrans %}
{% blocktrans %}To download, please click <a href="https://{{ site_domain }}{{ url }}">here</a>.{% endblocktrans %}
{% comment "Optional note" %}
<p>Commented out text with {{ create_date|date:"c" }}</p>
{% endcomment %}
{# This is not related to translations #}
{% something %}
{% comment %}This is a non-translator comment{% endcomment %}
{% comment %}Translators: This is an orphan translator comment{% endcomment %}
"""
TRANSIFEX_TEMPLATE = """
{% extends 'base.html' %}
{% load i18n %}
{% load transifex %}
{% t "Hello!" _comment="This is an amazing comment" %}
{% t "May" _comment="This is another amazing comment" _context="month name" %}
{% with first=events.first %}
{% with second=events.second %}
{% t another='foo' third=events.third|title %}
The following events were found:
1. {first}
2. {second}
3. {third}
And this is {another}.
{% endt %}
{% endwith %}
{% endwith %}
{% t counter='something'|length %}
{counter, plural, one {
There is only one {name} object.
} other {
There are {counter} {name} objects.
}}
{% endt %}
{% t |trimmed %}
First sentence.
Second paragraph.
{% endt %}
<a href="{{ url }}">Text</a>
{% t some_other_var %}
{% t some_other_var|some_filter %}
{% t "try as" as try %}
{% ut "try with <xml>xml</xml>" %}
{% ut %}
try with <xml>xml</xml>
{% endut %}
{% ut 'Help "{organization_name}" translate content ' organization_name=organization.name %}
{% ut 'To download, please click <a href="https://{site_domain}{url}">here</a>.' %}
{% comment "Optional note" %}
<p>Commented out text with {{ create_date|date:"c" }}</p>
{% endcomment %}
{# This is not related to translations #}
{% something %}
{% comment %}This is a non-translator comment{% endcomment %}
"""
def test_compiled_string_is_expected():
"""Test all known migration cases."""
builder = DjangoTagMigrationBuilder()
file_migration = builder.build_migration(DJANGO_TEMPLATE)
compiled = file_migration.compile()
assert compiled == TRANSIFEX_TEMPLATE
# Make sure the migration is idempotent
file_migration = builder.build_migration(compiled)
assert file_migration.compile() == TRANSIFEX_TEMPLATE
|
py | 7df875cb713d82a40483d469c74a8e611d54028d | # coding: utf-8
"""
Argo Server API
You can get examples of requests and responses by using the CLI with `--gloglevel=9`, e.g. `argo list --gloglevel=9` # noqa: E501
The version of the OpenAPI document: v2.12.2
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from argo.workflows.client.configuration import Configuration
class V1alpha1SuspendTemplate(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'duration': 'str'
}
attribute_map = {
'duration': 'duration'
}
def __init__(self, duration=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1SuspendTemplate - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._duration = None
self.discriminator = None
if duration is not None:
self.duration = duration
@property
def duration(self):
"""Gets the duration of this V1alpha1SuspendTemplate. # noqa: E501
Duration is the seconds to wait before automatically resuming a template # noqa: E501
:return: The duration of this V1alpha1SuspendTemplate. # noqa: E501
:rtype: str
"""
return self._duration
@duration.setter
def duration(self, duration):
"""Sets the duration of this V1alpha1SuspendTemplate.
Duration is the seconds to wait before automatically resuming a template # noqa: E501
:param duration: The duration of this V1alpha1SuspendTemplate. # noqa: E501
:type: str
"""
self._duration = duration
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1SuspendTemplate):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1SuspendTemplate):
return True
return self.to_dict() != other.to_dict()
|
py | 7df8765e4a67e6e2c0ade95c52af97c700d9e451 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
# https://github.com/pypa/setuptools_scm
use_scm = {"write_to": "smart_microscopy_minimal/_version.py"}
setup(
# use_scm_version=use_scm,
version = "0.1.1"
)
|
py | 7df876ffdcb7d5a2ec84c3327c80e87249655e4e |
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2014-2015 Haltu Oy, http://haltu.fi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import string
import factory
import factory.fuzzy
from selector import models
class UserFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.User
first_name = factory.Sequence(lambda n: 'First{0}'.format(n))
last_name = factory.Sequence(lambda n: 'Last{0}'.format(n))
email = factory.LazyAttribute(lambda u:
'{0}.{1}@example.com'.format(u.first_name, u.last_name))
username = factory.fuzzy.FuzzyText(length=11, chars=string.digits, prefix='1.2.246.562.24.')
# vim: tabstop=2 expandtab shiftwidth=2 softtabstop=2
|
py | 7df8773cf76c8e5529a8e160fc53104461a82a5d | from configparser import ConfigParser, ExtendedInterpolation
from os import name, system, getcwd, chdir, path
config = None
def input_int_with_limits(message: str, lower_bound: int, upper_bound: int) -> int:
while(True):
try:
option = int(input(message))
if ((option <= lower_bound) or (option >= upper_bound)):
print(f"Please enter a number between {lower_bound+1} and {upper_bound-1}.")
continue
return(option)
except ValueError as e:
print("Please enter a positive integer.")
def clear_terminal():
system('cls' if name == 'nt' else 'clear')
def change_dir():
if(name == 'nt' and getcwd().endswith("\\blackjack")):#running from top-level directory
chdir("src")
def print_banner():
clear_terminal()
change_dir()
[print(line, end = "") for line in open("assets/.banner.txt", "r").readlines()]
def read_config(config_filename: str) -> ConfigParser:
global config
if (not config):
config = ConfigParser(interpolation=ExtendedInterpolation())
config.read(path.join('.', config_filename))
return config |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.